[to #348] Add javadoc (#447)

Signed-off-by: Peng Guanwen <pg999w@outlook.com>
This commit is contained in:
Peng Guanwen 2021-12-28 09:33:26 +08:00 committed by GitHub
parent b94b3cbce7
commit 41d11ff035
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 28 additions and 22 deletions

View File

@ -21,6 +21,15 @@ jobs:
- run: mdbook build ./docs - run: mdbook build ./docs
- name: Set up JDK 8
uses: actions/setup-java@v2
with:
java-version: '8'
distribution: 'adopt'
- name: Javadoc
run: mvn clean javadoc:javadoc -Djavadoc.skip=false && mv ./target/site/apidocs ./docs/book/apidocs
- name: Deploy - name: Deploy
uses: peaceiris/actions-gh-pages@v3 uses: peaceiris/actions-gh-pages@v3
if: ${{ github.ref == 'refs/heads/master' }} if: ${{ github.ref == 'refs/heads/master' }}

5
.gitignore vendored
View File

@ -73,4 +73,7 @@ out/
# gradle/wrapper/gradle-wrapper.properties # gradle/wrapper/gradle-wrapper.properties
# vscode # vscode
.settings .settings
# mdbook
docs/book

View File

@ -50,9 +50,11 @@ Find more demo in [KVRawClientTest](https://github.com/birdstorm/KVRawClientTest
See [Java Client Documents](/docs/README.md) for references about how to config and monitor Java Client. See [Java Client Documents](/docs/README.md) for references about how to config and monitor Java Client.
An [API reference](https://tikv.github.io/client-java/apidocs) is also available.
## Community ## Community
See [Contribution Guide](/docs/dev-guide.md) for references about how to contribute to this project. See [Contribution Guide](https://tikv.github.io/client-java/contribution/introduction.html) for references about how to contribute to this project.
## License ## License

View File

@ -26,9 +26,9 @@ import org.tikv.common.predicates.PredicateUtils;
* PartAndFilterExprRewriter takes partition expression as an input. Rewriting rule is based on the * PartAndFilterExprRewriter takes partition expression as an input. Rewriting rule is based on the
* type of partition expression. 1. If partition expression is a columnRef, no rewriting will be * type of partition expression. 1. If partition expression is a columnRef, no rewriting will be
* performed. 2. If partition expression is year and the expression to be rewritten in the form of y * performed. 2. If partition expression is year and the expression to be rewritten in the form of y
* < '1995-10-10' then its right hand child will be replaced with "1995". 3. If partition expression * &lt; '1995-10-10' then its right hand child will be replaced with "1995". 3. If partition
* is year and the expression to be rewritten in the form of year(y) < '1995' then its left hand * expression is year and the expression to be rewritten in the form of year(y) &lt; '1995' then its
* child will be replaced with y. * left hand child will be replaced with y.
*/ */
public class PartAndFilterExprRewriter extends DefaultVisitor<Expression, Void> { public class PartAndFilterExprRewriter extends DefaultVisitor<Expression, Void> {
private final Expression partExpr; private final Expression partExpr;

View File

@ -106,8 +106,6 @@ public class ImporterStoreClient<RequestClass, ResponseClass>
/** /**
* Ingest KV pairs to RawKV/Txn using gRPC streaming mode. This API should be called on both * Ingest KV pairs to RawKV/Txn using gRPC streaming mode. This API should be called on both
* leader and followers. * leader and followers.
*
* @return
*/ */
public void startWrite() { public void startWrite() {
if (conf.isRawKVMode()) { if (conf.isRawKVMode()) {

View File

@ -368,19 +368,19 @@ public abstract class DataType implements Serializable {
/** /**
* Convert from Spark SQL Supported Java Type to TiDB Type * Convert from Spark SQL Supported Java Type to TiDB Type
* *
* <p>1. data convert, e.g. Integer -> SHORT * <p>1. data convert, e.g. Integer -&gt; SHORT
* *
* <p>2. check overflow, e.g. write 1000 to short * <p>2. check overflow, e.g. write 1000 to short
* *
* <p>Spark SQL only support following types: * <p>Spark SQL only support following types:
* *
* <p>1. BooleanType -> java.lang.Boolean 2. ByteType -> java.lang.Byte 3. ShortType -> * <p>{@literal 1. BooleanType -> java.lang.Boolean 2. ByteType -> java.lang.Byte 3. ShortType ->
* java.lang.Short 4. IntegerType -> java.lang.Integer 5. LongType -> java.lang.Long 6. FloatType * java.lang.Short 4. IntegerType -> java.lang.Integer 5. LongType -> java.lang.Long 6. FloatType
* -> java.lang.Float 7. DoubleType -> java.lang.Double 8. StringType -> String 9. DecimalType -> * -> java.lang.Float 7. DoubleType -> java.lang.Double 8. StringType -> String 9. DecimalType ->
* java.math.BigDecimal 10. DateType -> java.sql.Date 11. TimestampType -> java.sql.Timestamp 12. * java.math.BigDecimal 10. DateType -> java.sql.Date 11. TimestampType -> java.sql.Timestamp 12.
* BinaryType -> byte array 13. ArrayType -> scala.collection.Seq (use getList for java.util.List) * BinaryType -> byte array 13. ArrayType -> scala.collection.Seq (use getList for java.util.List)
* 14. MapType -> scala.collection.Map (use getJavaMap for java.util.Map) 15. StructType -> * 14. MapType -> scala.collection.Map (use getJavaMap for java.util.Map) 15. StructType ->
* org.apache.spark.sql.Row * org.apache.spark.sql.Row }
* *
* @param value * @param value
* @return * @return

View File

@ -49,7 +49,7 @@ public class RangeSplitter {
* *
* @param tableId Table id used for the handle * @param tableId Table id used for the handle
* @param handles Handle list * @param handles Handle list
* @return <Region, HandleList> map * @return {@code <Region, HandleList>} map
*/ */
public Map<Pair<TiRegion, TiStore>, TLongArrayList> groupByAndSortHandlesByRegionId( public Map<Pair<TiRegion, TiStore>, TLongArrayList> groupByAndSortHandlesByRegionId(
long tableId, TLongArrayList handles) { long tableId, TLongArrayList handles) {

View File

@ -140,7 +140,6 @@ public class TwoPhaseCommitter {
* @param backOffer * @param backOffer
* @param primaryKey * @param primaryKey
* @param value * @param value
* @return
*/ */
public void prewritePrimaryKey(BackOffer backOffer, byte[] primaryKey, byte[] value) public void prewritePrimaryKey(BackOffer backOffer, byte[] primaryKey, byte[] value)
throws TiBatchWriteException { throws TiBatchWriteException {
@ -196,7 +195,6 @@ public class TwoPhaseCommitter {
* *
* @param backOffer * @param backOffer
* @param key * @param key
* @return
*/ */
public void commitPrimaryKey(BackOffer backOffer, byte[] key, long commitTs) public void commitPrimaryKey(BackOffer backOffer, byte[] key, long commitTs)
throws TiBatchWriteException { throws TiBatchWriteException {
@ -236,7 +234,6 @@ public class TwoPhaseCommitter {
* *
* @param primaryKey * @param primaryKey
* @param pairs * @param pairs
* @return
*/ */
public void prewriteSecondaryKeys( public void prewriteSecondaryKeys(
byte[] primaryKey, Iterator<BytePairWrapper> pairs, int maxBackOfferMS) byte[] primaryKey, Iterator<BytePairWrapper> pairs, int maxBackOfferMS)
@ -499,7 +496,6 @@ public class TwoPhaseCommitter {
* *
* @param keys * @param keys
* @param commitTs * @param commitTs
* @return
*/ */
public void commitSecondaryKeys( public void commitSecondaryKeys(
Iterator<org.tikv.common.ByteWrapper> keys, long commitTs, int commitBackOfferMS) Iterator<org.tikv.common.ByteWrapper> keys, long commitTs, int commitBackOfferMS)

View File

@ -19,13 +19,11 @@ package org.tikv.txn;
import org.tikv.kvproto.Kvrpcpb; import org.tikv.kvproto.Kvrpcpb;
/** // ttl > 0: lock is not resolved
* ttl > 0: lock is not resolved //
* // <p>ttl = 0 && commitTS = 0: lock is deleted
* <p>ttl = 0 && commitTS = 0: lock is deleted //
* // <p>ttl = 0 && commitTS > 0: lock is committed
* <p>ttl = 0 && commitTS > 0: lock is committed
*/
public class TxnStatus { public class TxnStatus {
private long ttl; private long ttl;
private long commitTS; private long commitTS;