mirror of https://github.com/tikv/client-java.git
Compare commits
31 Commits
Author | SHA1 | Date |
---|---|---|
|
ffaf6ab43c | |
|
d9a834b521 | |
|
7ce6c4d1bc | |
|
b8c6b4b740 | |
|
d0b4965ce5 | |
|
8b61dc06b1 | |
|
4bd82827fa | |
|
814322e8e1 | |
|
e02e61c412 | |
|
8210eaf94d | |
|
ccdce8f247 | |
|
a4081e5bf8 | |
|
2aa58459fc | |
|
81d4981c80 | |
|
41b24bb877 | |
|
05f1559eab | |
|
c7d9ff151b | |
|
3aa9a5665b | |
|
35493c468a | |
|
031745b41b | |
|
7f468277c3 | |
|
71de93d73d | |
|
2913008410 | |
|
6a5ea6fb8b | |
|
01b391ff2f | |
|
490b4a1e01 | |
|
c6398badfe | |
|
4db2ac1ad7 | |
|
cd3ddc121a | |
|
06da8d0830 | |
|
131693bf8a |
|
@ -9,6 +9,7 @@ def call(ghprbActualCommit, ghprbPullId, ghprbPullTitle, ghprbPullLink, ghprbPul
|
|||
if (m1) {
|
||||
TIDB_BRANCH = "${m1[0][1]}"
|
||||
}
|
||||
m1 = null
|
||||
println "TIDB_BRANCH=${TIDB_BRANCH}"
|
||||
|
||||
// parse pd branch
|
||||
|
@ -16,6 +17,7 @@ def call(ghprbActualCommit, ghprbPullId, ghprbPullTitle, ghprbPullLink, ghprbPul
|
|||
if (m2) {
|
||||
PD_BRANCH = "${m2[0][1]}"
|
||||
}
|
||||
m2 = null
|
||||
println "PD_BRANCH=${PD_BRANCH}"
|
||||
|
||||
// parse tikv branch
|
||||
|
@ -23,6 +25,7 @@ def call(ghprbActualCommit, ghprbPullId, ghprbPullTitle, ghprbPullLink, ghprbPul
|
|||
if (m3) {
|
||||
TIKV_BRANCH = "${m3[0][1]}"
|
||||
}
|
||||
m3 = null
|
||||
println "TIKV_BRANCH=${TIKV_BRANCH}"
|
||||
|
||||
catchError {
|
||||
|
@ -61,13 +64,13 @@ def call(ghprbActualCommit, ghprbPullId, ghprbPullTitle, ghprbPullLink, ghprbPul
|
|||
killall -9 pd-server || true
|
||||
killall -9 java || true
|
||||
sleep 10
|
||||
bin/pd-server --name=pd --data-dir=pd --config=../.ci/config/pd.toml &>pd.log &
|
||||
bin/pd-server --name=pd --data-dir=pd --config=../config/pd.toml &>pd.log &
|
||||
sleep 10
|
||||
bin/tikv-server --pd=127.0.0.1:2379 -s tikv --addr=0.0.0.0:20160 --advertise-addr=127.0.0.1:20160 --config=../.ci/config/tikv.toml &>tikv.log &
|
||||
bin/tikv-server --pd=127.0.0.1:2379 -s tikv --addr=0.0.0.0:20160 --advertise-addr=127.0.0.1:20160 --config=../config/tikv.toml &>tikv.log &
|
||||
sleep 10
|
||||
ps aux | grep '-server' || true
|
||||
curl -s 127.0.0.1:2379/pd/api/v1/status || true
|
||||
bin/tidb-server --store=tikv --path="127.0.0.1:2379" --config=../.ci/config/tidb.toml &>tidb.log &
|
||||
bin/tidb-server --store=tikv --path="127.0.0.1:2379" --config=../config/tidb.toml &>tidb.log &
|
||||
sleep 60
|
||||
"""
|
||||
}
|
||||
|
|
|
@ -0,0 +1,36 @@
|
|||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
title: "[BUG] Title of Bug Report"
|
||||
labels: type/bug
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Describe the bug**
|
||||
<!-- A clear and concise description of what the bug is. -->
|
||||
|
||||
**What did you do**
|
||||
<!--
|
||||
If possible, please provide a code receipt to produce this issue.
|
||||
-->
|
||||
|
||||
**What do you expect**
|
||||
<!-- A clear and concise description of what you expected to happen. -->
|
||||
|
||||
**What happens instead**
|
||||
<!-- If an error occurs, please provide complete error stack. -->
|
||||
|
||||
<!--
|
||||
**Screenshots**
|
||||
If applicable, add screenshots to help explain your problem.
|
||||
-->
|
||||
|
||||
**Java Client and TiDB/TiKV version info**
|
||||
<!-- What version of Spark and TiSpark are you using? (Provide Spark version and run `spark.sql(“select ti_version()”).show(false)` in spark-shell) -->
|
||||
|
||||
<!--
|
||||
**Additional context**
|
||||
Add any other context about the problem here.
|
||||
You may also provide TiDB version here if it is related to the issue.
|
||||
-->
|
|
@ -0,0 +1,20 @@
|
|||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for this project
|
||||
title: ''
|
||||
labels: type/feature-request
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Is your feature request related to a problem? Please describe.**
|
||||
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
||||
|
||||
**Describe the solution you'd like**
|
||||
A clear and concise description of what you want to happen.
|
||||
|
||||
**Describe alternatives you've considered**
|
||||
A clear and concise description of any alternative solutions or features you've considered.
|
||||
|
||||
**Additional context**
|
||||
Add any other context or screenshots about the feature request here.
|
|
@ -0,0 +1,33 @@
|
|||
### What problem does this PR solve? <!--add issue link with summary if exists-->
|
||||
|
||||
|
||||
### What is changed and how it works?
|
||||
|
||||
|
||||
### Check List <!--REMOVE the items that are not applicable-->
|
||||
|
||||
Tests <!-- At least one of them must be included. -->
|
||||
|
||||
- Unit test
|
||||
- Integration test
|
||||
- Manual test (add detailed scripts or steps below)
|
||||
- No code
|
||||
|
||||
Code changes
|
||||
|
||||
- Has exported function/method change
|
||||
- Has exported variable/fields change
|
||||
- Has interface methods change
|
||||
- Has persistent data change
|
||||
|
||||
Side effects
|
||||
|
||||
- Possible performance regression
|
||||
- Increased code complexity
|
||||
- Breaking backward compatibility
|
||||
|
||||
Related changes
|
||||
|
||||
- Need to cherry-pick to the release branch
|
||||
- Need to update the documentation
|
||||
- Need to be included in the release note
|
188
README.md
188
README.md
|
@ -27,7 +27,7 @@ The jar can be found in `./target/`
|
|||
|
||||
## Usage
|
||||
|
||||
This project is designed to hook with `pd` and `tikv` which you can find in `PingCAP` github page.
|
||||
This project is designed to hook with `[pd](https://github.com/tikv/pd)` and `[tikv](https://github.com/tikv/tikv)`.
|
||||
|
||||
When you work with this project, you have to communicate with `pd` and `tikv`. Please run TiKV and PD in advance.
|
||||
|
||||
|
@ -56,7 +56,7 @@ After building, add following lines into your `pom.xml` if you are using Maven
|
|||
<dependency>
|
||||
<groupId>org.tikv</groupId>
|
||||
<artifactId>tikv-client-java</artifactId>
|
||||
<version>3.0.0</version>
|
||||
<version>3.1.0</version>
|
||||
</dependency>
|
||||
```
|
||||
|
||||
|
@ -66,6 +66,7 @@ After building, add following lines into your `pom.xml` if you are using Maven
|
|||
### Create a RawKVClient
|
||||
|
||||
```java
|
||||
import org.tikv.common.TiConfiguration;
|
||||
import org.tikv.common.TiSession;
|
||||
import org.tikv.raw.RawKVClient;
|
||||
|
||||
|
@ -74,109 +75,100 @@ public class Main {
|
|||
// You MUST create a raw configuration if you are using RawKVClient.
|
||||
TiConfiguration conf = TiConfiguration.createRawDefault(YOUR_PD_ADDRESSES);
|
||||
TiSession session = TiSession.create(conf);
|
||||
RawKVClient = session.createRawKVClient();
|
||||
RawKVClient client = session.createRawClient();
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### API
|
||||
## Java Client Configuration Parameter
|
||||
|
||||
```java
|
||||
/**
|
||||
* Put a raw key-value pair to TiKV
|
||||
*
|
||||
* @param key raw key
|
||||
* @param value raw value
|
||||
*/
|
||||
void put(ByteString key, ByteString value)
|
||||
### JVM Parameter
|
||||
|
||||
The following includes JVM related parameters.
|
||||
|
||||
#### tikv.pd.addresses
|
||||
- pd addresses, separated by comma
|
||||
- default: 127.0.0.1:2379
|
||||
|
||||
#### tikv.grpc.timeout_in_ms
|
||||
- timeout of grpc request
|
||||
- default: 600ms
|
||||
|
||||
#### tikv.grpc.scan_timeout_in_ms
|
||||
- timeout of scan/delete range grpc request
|
||||
- default: 20s
|
||||
|
||||
### Metrics Parameter
|
||||
|
||||
#### tikv.metrics.enable
|
||||
- whether to enable metrics exporting
|
||||
- default: false
|
||||
|
||||
#### tikv.metrics.port
|
||||
- the metrics exporting http port
|
||||
- default: 3140
|
||||
|
||||
### ThreadPool Parameter
|
||||
|
||||
The following includes ThreadPool related parameters, which can be passed in through JVM parameters.
|
||||
|
||||
#### tikv.batch_get_concurrency
|
||||
- the thread pool size of batchGet on client side
|
||||
- default: 20
|
||||
|
||||
#### tikv.batch_put_concurrency
|
||||
- the thread pool size of batchPut on client side
|
||||
- default: 20
|
||||
|
||||
#### tikv.batch_delete_concurrency
|
||||
- the thread pool size of batchDelete on client side
|
||||
- default: 20
|
||||
|
||||
#### tikv.batch_scan_concurrency
|
||||
- the thread pool size of batchScan on client side
|
||||
- default: 5
|
||||
|
||||
#### tikv.delete_range_concurrency
|
||||
- the thread pool size of deleteRange on client side
|
||||
- default: 20
|
||||
|
||||
#### tikv.rawkv.default_backoff_in_ms
|
||||
- RawKV default backoff in milliseconds
|
||||
- default: 20000 (20 seconds)
|
||||
|
||||
## Metrics
|
||||
|
||||
Client Java supports exporting metrics to Prometheus using poll mode and viewing on Grafana. The following steps shows how to enable this function.
|
||||
|
||||
### Step 1: Enable metrics exporting
|
||||
|
||||
- set the config `tikv.metrics.enable` to `true`
|
||||
- call TiConfiguration.setMetricsEnable(true)
|
||||
|
||||
### Step 2: Set the metrics port
|
||||
|
||||
- set the config `tikv.metrics.port`
|
||||
- call TiConfiguration.setMetricsPort
|
||||
|
||||
Default port is 3140.
|
||||
|
||||
### Step 3: Config Prometheus
|
||||
|
||||
Add the following config to `conf/prometheus.yml` and restart Prometheus.
|
||||
|
||||
```yaml
|
||||
- job_name: "tikv-client"
|
||||
honor_labels: true
|
||||
static_configs:
|
||||
- targets:
|
||||
- '127.0.0.1:3140'
|
||||
- '127.0.0.2:3140'
|
||||
- '127.0.0.3:3140'
|
||||
```
|
||||
|
||||
```java
|
||||
/**
|
||||
* Get a raw key-value pair from TiKV if key exists
|
||||
*
|
||||
* @param key raw key
|
||||
* @return a ByteString value if key exists, ByteString.EMPTY if key does not exist
|
||||
*/
|
||||
ByteString get(ByteString key)
|
||||
```
|
||||
|
||||
```java
|
||||
/**
|
||||
* Scan raw key-value pairs from TiKV in range [startKey, endKey)
|
||||
*
|
||||
* @param startKey raw start key, inclusive
|
||||
* @param endKey raw end key, exclusive
|
||||
* @param limit limit of key-value pairs scanned, should be less than {@link #MAX_RAW_SCAN_LIMIT}
|
||||
* @return list of key-value pairs in range
|
||||
*/
|
||||
List<Kvrpcpb.KvPair> scan(ByteString startKey, ByteString endKey, int limit)
|
||||
```
|
||||
|
||||
```java
|
||||
/**
|
||||
* Scan raw key-value pairs from TiKV in range [startKey, endKey)
|
||||
*
|
||||
* @param startKey raw start key, inclusive
|
||||
* @param limit limit of key-value pairs scanned, should be less than {@link #MAX_RAW_SCAN_LIMIT}
|
||||
* @return list of key-value pairs in range
|
||||
*/
|
||||
List<Kvrpcpb.KvPair> scan(ByteString startKey, int limit)
|
||||
```
|
||||
|
||||
```java
|
||||
/**
|
||||
* Delete a raw key-value pair from TiKV if key exists
|
||||
*
|
||||
* @param key raw key to be deleted
|
||||
*/
|
||||
void delete(ByteString key)
|
||||
```
|
||||
|
||||
## Java Client 配置参数
|
||||
|
||||
本文介绍了与部署使用 Java Client 相关的配置参数。
|
||||
|
||||
### 常用配置 JVM 参数
|
||||
|
||||
以下包括常用配置的 JVM 相关参数。
|
||||
|
||||
####tikv.pd.addresses
|
||||
- pd 集群的地址,逗号分隔
|
||||
- 默认值 127.0.0.1:2379
|
||||
|
||||
####tikv.grpc.timeout_in_ms
|
||||
- grpc 请求的 timeout 时间
|
||||
- 默认值 600ms
|
||||
|
||||
####tikv.grpc.scan_timeout_in_ms
|
||||
- scan/delete range grpc 请求的 timeout 时间
|
||||
- 默认值 20s
|
||||
|
||||
### ThreadPool 配置 JVM 参数
|
||||
|
||||
以下包括 ThreadPool 相关的参数及其默认配置,可通过 JVM 参数传入。
|
||||
|
||||
####tikv.batch_get_concurrency
|
||||
- Client 端 batchGet 请求的线程池大小
|
||||
- 默认值 20
|
||||
|
||||
####tikv.batch_put_concurrency
|
||||
- Client 端 batchPut 请求的线程池大小
|
||||
- 默认值 20
|
||||
|
||||
####tikv.batch_delete_concurrency
|
||||
- Client 端 batchDelete 请求的线程池大小
|
||||
- 默认值 20
|
||||
|
||||
####tikv.batch_scan_concurrency
|
||||
- Client 端 batchScan 请求的线程池大小
|
||||
- 默认值 5
|
||||
|
||||
####tikv.delete_range_concurrency
|
||||
- Client 端 deleteRange 请求的线程池大小
|
||||
- 默认值 20
|
||||
### Step 4: Config Grafana
|
||||
|
||||
Import the [Client-Java-Summary dashboard config](/metrics/grafana/client_java_summary.json) to Grafana.
|
||||
|
||||
## License
|
||||
Apache 2.0 license. See the [LICENSE](./LICENSE) file for details.
|
||||
Apache 2.0 license. See the [LICENSE](./LICENSE) file for details.
|
|
@ -0,0 +1,4 @@
|
|||
# PD Configuration.
|
||||
[replication]
|
||||
enable-placement-rules = true
|
||||
max-replicas = 1
|
|
@ -0,0 +1 @@
|
|||
# TiDB Configuration.
|
|
@ -0,0 +1,5 @@
|
|||
# TiKV Configuration.
|
||||
|
||||
[raftstore]
|
||||
# set store capacity, if no set, use disk capacity.
|
||||
capacity = "8G"
|
|
@ -0,0 +1,439 @@
|
|||
{
|
||||
"__inputs": [
|
||||
{
|
||||
"name": "DS_TEST-CLUSTER",
|
||||
"label": "test-cluster",
|
||||
"description": "",
|
||||
"type": "datasource",
|
||||
"pluginId": "prometheus",
|
||||
"pluginName": "Prometheus"
|
||||
}
|
||||
],
|
||||
"__requires": [
|
||||
{
|
||||
"type": "grafana",
|
||||
"id": "grafana",
|
||||
"name": "Grafana",
|
||||
"version": "6.1.6"
|
||||
},
|
||||
{
|
||||
"type": "panel",
|
||||
"id": "graph",
|
||||
"name": "Graph",
|
||||
"version": ""
|
||||
},
|
||||
{
|
||||
"type": "datasource",
|
||||
"id": "prometheus",
|
||||
"name": "Prometheus",
|
||||
"version": "1.0.0"
|
||||
}
|
||||
],
|
||||
"annotations": {
|
||||
"list": [
|
||||
{
|
||||
"builtIn": 1,
|
||||
"datasource": "-- Grafana --",
|
||||
"enable": true,
|
||||
"hide": true,
|
||||
"iconColor": "rgba(0, 211, 255, 1)",
|
||||
"name": "Annotations & Alerts",
|
||||
"type": "dashboard"
|
||||
}
|
||||
]
|
||||
},
|
||||
"editable": true,
|
||||
"gnetId": null,
|
||||
"graphTooltip": 0,
|
||||
"id": null,
|
||||
"links": [],
|
||||
"panels": [
|
||||
{
|
||||
"aliasColors": {},
|
||||
"bars": false,
|
||||
"dashLength": 10,
|
||||
"dashes": false,
|
||||
"datasource": "${DS_TEST-CLUSTER}",
|
||||
"fill": 1,
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 0
|
||||
},
|
||||
"id": 2,
|
||||
"legend": {
|
||||
"alignAsTable": true,
|
||||
"avg": false,
|
||||
"current": true,
|
||||
"max": true,
|
||||
"min": false,
|
||||
"rightSide": true,
|
||||
"show": true,
|
||||
"total": false,
|
||||
"values": true
|
||||
},
|
||||
"lines": true,
|
||||
"linewidth": 1,
|
||||
"links": [],
|
||||
"nullPointMode": "null",
|
||||
"percentage": false,
|
||||
"pointradius": 2,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [],
|
||||
"spaceLength": 10,
|
||||
"stack": false,
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum(rate(client_java_raw_requests_latency_count[1m])) by (type)",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{type}}",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "Client QPS",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
"value_type": "individual"
|
||||
},
|
||||
"type": "graph",
|
||||
"xaxis": {
|
||||
"buckets": null,
|
||||
"mode": "time",
|
||||
"name": null,
|
||||
"show": true,
|
||||
"values": []
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
"bars": false,
|
||||
"dashLength": 10,
|
||||
"dashes": false,
|
||||
"datasource": "${DS_TEST-CLUSTER}",
|
||||
"fill": 1,
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 0
|
||||
},
|
||||
"id": 4,
|
||||
"legend": {
|
||||
"alignAsTable": true,
|
||||
"avg": false,
|
||||
"current": true,
|
||||
"max": true,
|
||||
"min": false,
|
||||
"rightSide": true,
|
||||
"show": true,
|
||||
"total": false,
|
||||
"values": true
|
||||
},
|
||||
"lines": true,
|
||||
"linewidth": 1,
|
||||
"links": [],
|
||||
"nullPointMode": "null",
|
||||
"percentage": false,
|
||||
"pointradius": 2,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [],
|
||||
"spaceLength": 10,
|
||||
"stack": false,
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum(rate(client_java_raw_requests_latency_sum[1m])) by (type) / sum(rate(client_java_raw_requests_latency_count[1m])) by (type)",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{type}}",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "Client Avg Latency",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
"value_type": "individual"
|
||||
},
|
||||
"type": "graph",
|
||||
"xaxis": {
|
||||
"buckets": null,
|
||||
"mode": "time",
|
||||
"name": null,
|
||||
"show": true,
|
||||
"values": []
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"format": "s",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
"bars": false,
|
||||
"dashLength": 10,
|
||||
"dashes": false,
|
||||
"datasource": "${DS_TEST-CLUSTER}",
|
||||
"fill": 1,
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 8
|
||||
},
|
||||
"id": 8,
|
||||
"legend": {
|
||||
"alignAsTable": true,
|
||||
"avg": false,
|
||||
"current": true,
|
||||
"max": true,
|
||||
"min": false,
|
||||
"rightSide": true,
|
||||
"show": true,
|
||||
"total": false,
|
||||
"values": true
|
||||
},
|
||||
"lines": true,
|
||||
"linewidth": 1,
|
||||
"links": [],
|
||||
"nullPointMode": "null",
|
||||
"percentage": false,
|
||||
"pointradius": 2,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [],
|
||||
"spaceLength": 10,
|
||||
"stack": false,
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum(rate(client_java_raw_requests_failure_total[1m])) by (type)",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{type}}",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "Client Failures",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
"value_type": "individual"
|
||||
},
|
||||
"type": "graph",
|
||||
"xaxis": {
|
||||
"buckets": null,
|
||||
"mode": "time",
|
||||
"name": null,
|
||||
"show": true,
|
||||
"values": []
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
"bars": false,
|
||||
"dashLength": 10,
|
||||
"dashes": false,
|
||||
"datasource": "${DS_TEST-CLUSTER}",
|
||||
"fill": 1,
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 9
|
||||
},
|
||||
"id": 7,
|
||||
"legend": {
|
||||
"alignAsTable": true,
|
||||
"avg": false,
|
||||
"current": true,
|
||||
"max": true,
|
||||
"min": false,
|
||||
"rightSide": true,
|
||||
"show": true,
|
||||
"total": false,
|
||||
"values": true
|
||||
},
|
||||
"lines": true,
|
||||
"linewidth": 1,
|
||||
"links": [],
|
||||
"nullPointMode": "null",
|
||||
"percentage": false,
|
||||
"pointradius": 2,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [],
|
||||
"spaceLength": 10,
|
||||
"stack": false,
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum(rate(client_java_grpc_raw_requests_latency_sum[1m])) by (type) / sum(rate(client_java_grpc_raw_requests_latency_count[1m])) by (type)",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{type}}",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "Client gRPC Avg Latency",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
"value_type": "individual"
|
||||
},
|
||||
"type": "graph",
|
||||
"xaxis": {
|
||||
"buckets": null,
|
||||
"mode": "time",
|
||||
"name": null,
|
||||
"show": true,
|
||||
"values": []
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"format": "s",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
}
|
||||
],
|
||||
"schemaVersion": 18,
|
||||
"style": "dark",
|
||||
"tags": [],
|
||||
"templating": {
|
||||
"list": []
|
||||
},
|
||||
"time": {
|
||||
"from": "now-6h",
|
||||
"to": "now"
|
||||
},
|
||||
"timepicker": {
|
||||
"refresh_intervals": [
|
||||
"5s",
|
||||
"10s",
|
||||
"30s",
|
||||
"1m",
|
||||
"5m",
|
||||
"15m",
|
||||
"30m",
|
||||
"1h",
|
||||
"2h",
|
||||
"1d"
|
||||
],
|
||||
"time_options": [
|
||||
"5m",
|
||||
"15m",
|
||||
"1h",
|
||||
"6h",
|
||||
"12h",
|
||||
"24h",
|
||||
"2d",
|
||||
"7d",
|
||||
"30d"
|
||||
]
|
||||
},
|
||||
"timezone": "browser",
|
||||
"title": "Client-Java-Summary",
|
||||
"uid": "000000911",
|
||||
"version": 1
|
||||
}
|
9
pom.xml
9
pom.xml
|
@ -5,7 +5,7 @@
|
|||
|
||||
<groupId>org.tikv</groupId>
|
||||
<artifactId>tikv-client-java</artifactId>
|
||||
<version>3.0.2-SNAPSHOT</version>
|
||||
<version>3.1.2</version>
|
||||
<packaging>jar</packaging>
|
||||
<name>TiKV Java Client</name>
|
||||
<description>A Java Client for TiKV</description>
|
||||
|
@ -126,6 +126,11 @@
|
|||
<artifactId>grpc-stub</artifactId>
|
||||
<version>${grpc.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.grpc</groupId>
|
||||
<artifactId>grpc-services</artifactId>
|
||||
<version>${grpc.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.grpc</groupId>
|
||||
<artifactId>grpc-testing</artifactId>
|
||||
|
@ -180,7 +185,7 @@
|
|||
<dependency>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
<artifactId>commons-lang3</artifactId>
|
||||
<version>3.9</version>
|
||||
<version>3.10</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
|
|
|
@ -1,34 +0,0 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
java_library(
|
||||
name = "tikv-java-client-lib",
|
||||
srcs = glob(
|
||||
["**/*.java"],
|
||||
),
|
||||
deps = [
|
||||
"//:java",
|
||||
"@com_fasterxml_jackson_core_jackson_annotations//jar",
|
||||
"@com_fasterxml_jackson_core_jackson_core//jar",
|
||||
"@com_fasterxml_jackson_core_jackson_databind//jar",
|
||||
"@com_google_code_findbugs_jsr305//jar",
|
||||
"@com_google_code_gson_gson//jar",
|
||||
"@com_google_errorprone_error_prone_annotations//jar",
|
||||
"@com_google_guava_guava//jar",
|
||||
"@com_google_protobuf_protobuf_java//jar",
|
||||
"@joda_time//jar",
|
||||
# the following are defined in rules_protobuf
|
||||
"@org_pubref_rules_protobuf//java:grpc_compiletime_deps",
|
||||
"@org_pubref_rules_protobuf//java:netty_runtime_deps",
|
||||
|
||||
"@org_slf4j_slf4j_api//jar",
|
||||
"@org_slf4j_jcl_over_slf4j//jar",
|
||||
"@org_slf4j_jul_to_slf4j//jar",
|
||||
"@log4j_log4j//jar",
|
||||
"@net_sf_trove4j_trove4j//jar",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "srcs",
|
||||
srcs = ["BUILD"] + glob(["**/*.java"]),
|
||||
)
|
|
@ -1,5 +0,0 @@
|
|||
package org.tikv;
|
||||
|
||||
public class Main {
|
||||
public static void main(String args[]) throws Exception {}
|
||||
}
|
|
@ -18,10 +18,15 @@ package org.tikv.common;
|
|||
import static io.grpc.stub.ClientCalls.asyncBidiStreamingCall;
|
||||
import static io.grpc.stub.ClientCalls.blockingServerStreamingCall;
|
||||
|
||||
import io.grpc.ManagedChannel;
|
||||
import io.grpc.MethodDescriptor;
|
||||
import io.grpc.health.v1.HealthCheckRequest;
|
||||
import io.grpc.health.v1.HealthCheckResponse;
|
||||
import io.grpc.health.v1.HealthGrpc;
|
||||
import io.grpc.stub.AbstractStub;
|
||||
import io.grpc.stub.ClientCalls;
|
||||
import io.grpc.stub.StreamObserver;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.function.Supplier;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
@ -171,4 +176,20 @@ public abstract class AbstractGRPCClient<
|
|||
protected abstract BlockingStubT getBlockingStub();
|
||||
|
||||
protected abstract StubT getAsyncStub();
|
||||
|
||||
protected boolean checkHealth(String addressStr, HostMapping hostMapping) {
|
||||
ManagedChannel channel = channelFactory.getChannel(addressStr, hostMapping);
|
||||
HealthGrpc.HealthBlockingStub stub =
|
||||
HealthGrpc.newBlockingStub(channel).withDeadlineAfter(getTimeout(), TimeUnit.MILLISECONDS);
|
||||
HealthCheckRequest req = HealthCheckRequest.newBuilder().build();
|
||||
try {
|
||||
HealthCheckResponse resp = stub.check(req);
|
||||
if (resp.getStatus() != HealthCheckResponse.ServingStatus.SERVING) {
|
||||
return false;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -15,11 +15,13 @@
|
|||
|
||||
package org.tikv.common;
|
||||
|
||||
import org.tikv.common.util.BackOffer;
|
||||
import org.tikv.kvproto.Kvrpcpb;
|
||||
|
||||
public class ConfigUtils {
|
||||
public static final String TIKV_PD_ADDRESSES = "tikv.pd.addresses";
|
||||
public static final String TIKV_GRPC_TIMEOUT = "tikv.grpc.timeout_in_ms";
|
||||
public static final String TIKV_GRPC_FORWARD_TIMEOUT = "tikv.grpc.forward_timeout_in_ms";
|
||||
public static final String TIKV_GRPC_SCAN_TIMEOUT = "tikv.grpc.scan_timeout_in_ms";
|
||||
public static final String TIKV_GRPC_SCAN_BATCH_SIZE = "tikv.grpc.scan_batch_size";
|
||||
public static final String TIKV_GRPC_MAX_FRAME_SIZE = "tikv.grpc.max_frame_size";
|
||||
|
@ -42,16 +44,25 @@ public class ConfigUtils {
|
|||
public static final String TIKV_KV_CLIENT_CONCURRENCY = "tikv.kv_client_concurrency";
|
||||
|
||||
public static final String TIKV_KV_MODE = "tikv.kv_mode";
|
||||
public static final String TIKV_IS_REPLICA_READ = "tikv.is_replica_read";
|
||||
public static final String TIKV_REPLICA_READ = "tikv.replica_read";
|
||||
|
||||
public static final String TIKV_METRICS_ENABLE = "tikv.metrics.enable";
|
||||
public static final String TIKV_METRICS_PORT = "tikv.metrics.port";
|
||||
|
||||
public static final String TIKV_NETWORK_MAPPING_NAME = "tikv.network.mapping";
|
||||
public static final String TIKV_ENABLE_GRPC_FORWARD = "tikv.enable_grpc_forward";
|
||||
public static final String TIKV_GRPC_HEALTH_CHECK_TIMEOUT = "tikv.grpc.health_check_timeout";
|
||||
public static final String TIKV_HEALTH_CHECK_PERIOD_DURATION =
|
||||
"tikv.health_check_period_duration";
|
||||
|
||||
public static final String TIKV_RAWKV_DEFAULT_BACKOFF_IN_MS = "tikv.rawkv.default_backoff_in_ms";
|
||||
|
||||
public static final String DEF_PD_ADDRESSES = "127.0.0.1:2379";
|
||||
public static final String DEF_TIMEOUT = "600ms";
|
||||
public static final String DEF_TIMEOUT = "200ms";
|
||||
public static final String DEF_FORWARD_TIMEOUT = "300ms";
|
||||
public static final String DEF_SCAN_TIMEOUT = "20s";
|
||||
public static final int DEF_CHECK_HEALTH_TIMEOUT = 100;
|
||||
public static final int DEF_HEALTH_CHECK_PERIOD_DURATION = 300;
|
||||
public static final int DEF_SCAN_BATCH_SIZE = 10240;
|
||||
public static final int DEF_MAX_FRAME_SIZE = 268435456 * 2; // 256 * 2 MB
|
||||
public static final int DEF_INDEX_SCAN_BATCH_SIZE = 20000;
|
||||
|
@ -72,10 +83,13 @@ public class ConfigUtils {
|
|||
public static final String DEF_DB_PREFIX = "";
|
||||
public static final int DEF_KV_CLIENT_CONCURRENCY = 10;
|
||||
public static final TiConfiguration.KVMode DEF_KV_MODE = TiConfiguration.KVMode.TXN;
|
||||
public static final boolean DEF_IS_REPLICA_READ = false;
|
||||
public static final String DEF_REPLICA_READ = "LEADER";
|
||||
public static final boolean DEF_METRICS_ENABLE = false;
|
||||
public static final int DEF_METRICS_PORT = 3140;
|
||||
public static final String DEF_TIKV_NETWORK_MAPPING_NAME = "";
|
||||
public static final boolean DEF_GRPC_FORWARD_ENABLE = true;
|
||||
|
||||
public static final int DEF_TIKV_RAWKV_DEFAULT_BACKOFF_IN_MS = BackOffer.RAWKV_MAX_BACKOFF;
|
||||
|
||||
public static final String NORMAL_COMMAND_PRIORITY = "NORMAL";
|
||||
public static final String LOW_COMMAND_PRIORITY = "LOW";
|
||||
|
@ -86,4 +100,8 @@ public class ConfigUtils {
|
|||
|
||||
public static final String RAW_KV_MODE = "RAW";
|
||||
public static final String TXN_KV_MODE = "TXN";
|
||||
|
||||
public static final String LEADER = "LEADER";
|
||||
public static final String FOLLOWER = "FOLLOWER";
|
||||
public static final String LEADER_AND_FOLLOWER = "LEADER_AND_FOLLOWER";
|
||||
}
|
||||
|
|
|
@ -0,0 +1,87 @@
|
|||
/*
|
||||
* Copyright 2021 PingCAP, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.tikv.common;
|
||||
|
||||
import static org.tikv.common.pd.PDUtils.addrToUri;
|
||||
|
||||
import com.google.common.annotations.Beta;
|
||||
import io.etcd.jetcd.ByteSequence;
|
||||
import io.etcd.jetcd.Client;
|
||||
import io.etcd.jetcd.KeyValue;
|
||||
import io.etcd.jetcd.kv.GetResponse;
|
||||
import java.net.URI;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.CompletableFuture;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
public class DefaultHostMapping implements HostMapping {
|
||||
private static final String NETWORK_MAPPING_PATH = "/client/url-mapping";
|
||||
private final Client etcdClient;
|
||||
private final String networkMappingName;
|
||||
private final ConcurrentMap<String, String> hostMapping;
|
||||
private final Logger logger = LoggerFactory.getLogger(DefaultHostMapping.class);
|
||||
|
||||
public DefaultHostMapping(Client etcdClient, String networkMappingName) {
|
||||
this.etcdClient = etcdClient;
|
||||
this.networkMappingName = networkMappingName;
|
||||
this.hostMapping = new ConcurrentHashMap<>();
|
||||
}
|
||||
|
||||
private ByteSequence hostToNetworkMappingKey(String host) {
|
||||
String path = NETWORK_MAPPING_PATH + "/" + networkMappingName + "/" + host;
|
||||
return ByteSequence.from(path, StandardCharsets.UTF_8);
|
||||
}
|
||||
|
||||
@Beta
|
||||
private String getMappedHostFromPD(String host) {
|
||||
ByteSequence hostKey = hostToNetworkMappingKey(host);
|
||||
for (int i = 0; i < 5; i++) {
|
||||
CompletableFuture<GetResponse> future = etcdClient.getKVClient().get(hostKey);
|
||||
try {
|
||||
GetResponse resp = future.get();
|
||||
List<KeyValue> kvs = resp.getKvs();
|
||||
if (kvs.size() != 1) {
|
||||
break;
|
||||
}
|
||||
return kvs.get(0).getValue().toString(StandardCharsets.UTF_8);
|
||||
} catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
} catch (ExecutionException e) {
|
||||
logger.info("failed to get mapped Host from PD: " + host, e);
|
||||
break;
|
||||
} catch (Exception ignore) {
|
||||
// ignore
|
||||
break;
|
||||
}
|
||||
}
|
||||
return host;
|
||||
}
|
||||
|
||||
public URI getMappedURI(URI uri) {
|
||||
if (networkMappingName.isEmpty()) {
|
||||
return uri;
|
||||
}
|
||||
return addrToUri(
|
||||
hostMapping.computeIfAbsent(uri.getHost(), this::getMappedHostFromPD)
|
||||
+ ":"
|
||||
+ uri.getPort());
|
||||
}
|
||||
}
|
|
@ -15,73 +15,9 @@
|
|||
|
||||
package org.tikv.common;
|
||||
|
||||
import static org.tikv.common.pd.PDUtils.addrToUri;
|
||||
|
||||
import com.google.common.annotations.Beta;
|
||||
import io.etcd.jetcd.ByteSequence;
|
||||
import io.etcd.jetcd.Client;
|
||||
import io.etcd.jetcd.KeyValue;
|
||||
import io.etcd.jetcd.kv.GetResponse;
|
||||
import java.io.Serializable;
|
||||
import java.net.URI;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.CompletableFuture;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
public class HostMapping {
|
||||
private static final String NETWORK_MAPPING_PATH = "/client/url-mapping";
|
||||
private final Client etcdClient;
|
||||
private final String networkMappingName;
|
||||
private final ConcurrentMap<String, String> hostMapping;
|
||||
private final Logger logger = LoggerFactory.getLogger(HostMapping.class);
|
||||
|
||||
public HostMapping(Client etcdClient, String networkMappingName) {
|
||||
this.etcdClient = etcdClient;
|
||||
this.networkMappingName = networkMappingName;
|
||||
this.hostMapping = new ConcurrentHashMap<>();
|
||||
}
|
||||
|
||||
private ByteSequence hostToNetworkMappingKey(String host) {
|
||||
String path = NETWORK_MAPPING_PATH + "/" + networkMappingName + "/" + host;
|
||||
return ByteSequence.from(path, StandardCharsets.UTF_8);
|
||||
}
|
||||
|
||||
@Beta
|
||||
private String getMappedHostFromPD(String host) {
|
||||
ByteSequence hostKey = hostToNetworkMappingKey(host);
|
||||
for (int i = 0; i < 5; i++) {
|
||||
CompletableFuture<GetResponse> future = etcdClient.getKVClient().get(hostKey);
|
||||
try {
|
||||
GetResponse resp = future.get();
|
||||
List<KeyValue> kvs = resp.getKvs();
|
||||
if (kvs.size() != 1) {
|
||||
break;
|
||||
}
|
||||
return kvs.get(0).getValue().toString(StandardCharsets.UTF_8);
|
||||
} catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
} catch (ExecutionException e) {
|
||||
logger.info("failed to get mapped Host from PD: " + host, e);
|
||||
break;
|
||||
} catch (Exception ignore) {
|
||||
// ignore
|
||||
break;
|
||||
}
|
||||
}
|
||||
return host;
|
||||
}
|
||||
|
||||
public URI getMappedURI(URI uri) {
|
||||
if (networkMappingName.isEmpty()) {
|
||||
return uri;
|
||||
}
|
||||
return addrToUri(
|
||||
hostMapping.computeIfAbsent(uri.getHost(), this::getMappedHostFromPD)
|
||||
+ ":"
|
||||
+ uri.getPort());
|
||||
}
|
||||
public interface HostMapping extends Serializable {
|
||||
URI getMappedURI(URI uri);
|
||||
}
|
||||
|
|
|
@ -0,0 +1,93 @@
|
|||
/*
|
||||
* Copyright 2021 PingCAP, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.tikv.common;
|
||||
|
||||
import io.prometheus.client.CollectorRegistry;
|
||||
import io.prometheus.client.exporter.HTTPServer;
|
||||
import java.net.InetSocketAddress;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.tikv.common.policy.RetryPolicy;
|
||||
import org.tikv.common.region.RegionManager;
|
||||
import org.tikv.common.region.RegionStoreClient;
|
||||
import org.tikv.raw.RawKVClient;
|
||||
|
||||
public class MetricsServer {
|
||||
private static final Logger logger = LoggerFactory.getLogger(MetricsServer.class);
|
||||
|
||||
private static MetricsServer METRICS_SERVER_INSTANCE = null;
|
||||
private static int metricsServerRefCount = 0;
|
||||
|
||||
private int port;
|
||||
private HTTPServer server;
|
||||
private CollectorRegistry collectorRegistry;
|
||||
|
||||
public static MetricsServer getInstance(TiConfiguration conf) {
|
||||
if (!conf.isMetricsEnable()) {
|
||||
return null;
|
||||
}
|
||||
|
||||
synchronized (MetricsServer.class) {
|
||||
int port = conf.getMetricsPort();
|
||||
if (METRICS_SERVER_INSTANCE != null) {
|
||||
if (port != METRICS_SERVER_INSTANCE.port) {
|
||||
throw new IllegalArgumentException(
|
||||
String.format(
|
||||
"Do dot support multiple tikv.metrics.port, which are %d and %d",
|
||||
port, METRICS_SERVER_INSTANCE.port));
|
||||
}
|
||||
} else {
|
||||
METRICS_SERVER_INSTANCE = new MetricsServer(port);
|
||||
}
|
||||
metricsServerRefCount += 1;
|
||||
return METRICS_SERVER_INSTANCE;
|
||||
}
|
||||
}
|
||||
|
||||
private MetricsServer(int port) {
|
||||
try {
|
||||
this.collectorRegistry = new CollectorRegistry();
|
||||
this.collectorRegistry.register(RawKVClient.RAW_REQUEST_LATENCY);
|
||||
this.collectorRegistry.register(RawKVClient.RAW_REQUEST_FAILURE);
|
||||
this.collectorRegistry.register(RawKVClient.RAW_REQUEST_SUCCESS);
|
||||
this.collectorRegistry.register(RegionStoreClient.GRPC_RAW_REQUEST_LATENCY);
|
||||
this.collectorRegistry.register(RetryPolicy.GRPC_SINGLE_REQUEST_LATENCY);
|
||||
this.collectorRegistry.register(RegionManager.GET_REGION_BY_KEY_REQUEST_LATENCY);
|
||||
this.collectorRegistry.register(PDClient.PD_GET_REGION_BY_KEY_REQUEST_LATENCY);
|
||||
this.port = port;
|
||||
this.server = new HTTPServer(new InetSocketAddress(port), this.collectorRegistry, true);
|
||||
logger.info("http server is up " + this.server.getPort());
|
||||
} catch (Exception e) {
|
||||
logger.error("http server not up");
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
public void close() {
|
||||
synchronized (MetricsServer.class) {
|
||||
if (metricsServerRefCount == 1) {
|
||||
if (server != null) {
|
||||
server.stop();
|
||||
logger.info("Metrics server on " + server.getPort() + " is stopped");
|
||||
}
|
||||
METRICS_SERVER_INSTANCE = null;
|
||||
}
|
||||
|
||||
if (metricsServerRefCount >= 1) {
|
||||
metricsServerRefCount -= 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -30,23 +30,27 @@ import io.etcd.jetcd.KeyValue;
|
|||
import io.etcd.jetcd.kv.GetResponse;
|
||||
import io.etcd.jetcd.options.GetOption;
|
||||
import io.grpc.ManagedChannel;
|
||||
import io.grpc.Metadata;
|
||||
import io.grpc.stub.MetadataUtils;
|
||||
import io.prometheus.client.Histogram;
|
||||
import java.net.URI;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.CompletableFuture;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.Future;
|
||||
import java.util.concurrent.ScheduledExecutorService;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.stream.Collectors;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.tikv.common.TiConfiguration.KVMode;
|
||||
import org.tikv.common.codec.Codec.BytesCodec;
|
||||
import org.tikv.common.codec.CodecDataInput;
|
||||
import org.tikv.common.codec.CodecDataOutput;
|
||||
import org.tikv.common.codec.KeyUtils;
|
||||
import org.tikv.common.exception.GrpcException;
|
||||
|
@ -54,16 +58,17 @@ import org.tikv.common.exception.TiClientInternalException;
|
|||
import org.tikv.common.meta.TiTimestamp;
|
||||
import org.tikv.common.operation.NoopHandler;
|
||||
import org.tikv.common.operation.PDErrorHandler;
|
||||
import org.tikv.common.region.TiRegion;
|
||||
import org.tikv.common.util.BackOffFunction.BackOffFuncType;
|
||||
import org.tikv.common.util.BackOffer;
|
||||
import org.tikv.common.util.ChannelFactory;
|
||||
import org.tikv.common.util.ConcreteBackOffer;
|
||||
import org.tikv.common.util.FutureObserver;
|
||||
import org.tikv.common.util.Pair;
|
||||
import org.tikv.kvproto.Metapb;
|
||||
import org.tikv.kvproto.Metapb.Store;
|
||||
import org.tikv.kvproto.PDGrpc;
|
||||
import org.tikv.kvproto.PDGrpc.PDBlockingStub;
|
||||
import org.tikv.kvproto.PDGrpc.PDStub;
|
||||
import org.tikv.kvproto.Pdpb;
|
||||
import org.tikv.kvproto.Pdpb.Error;
|
||||
import org.tikv.kvproto.Pdpb.ErrorType;
|
||||
import org.tikv.kvproto.Pdpb.GetAllStoresRequest;
|
||||
|
@ -88,16 +93,18 @@ import org.tikv.kvproto.Pdpb.TsoResponse;
|
|||
public class PDClient extends AbstractGRPCClient<PDBlockingStub, PDStub>
|
||||
implements ReadOnlyPDClient {
|
||||
private static final String TIFLASH_TABLE_SYNC_PROGRESS_PATH = "/tiflash/table/sync";
|
||||
private static final long MIN_TRY_UPDATE_DURATION = 50;
|
||||
private final Logger logger = LoggerFactory.getLogger(PDClient.class);
|
||||
private RequestHeader header;
|
||||
private TsoRequest tsoReq;
|
||||
private volatile LeaderWrapper leaderWrapper;
|
||||
private volatile PDClientWrapper pdClientWrapper;
|
||||
private ScheduledExecutorService service;
|
||||
private ScheduledExecutorService tiflashReplicaService;
|
||||
private List<URI> pdAddrs;
|
||||
private Client etcdClient;
|
||||
private ConcurrentMap<Long, Double> tiflashReplicaMap;
|
||||
private HostMapping hostMapping;
|
||||
private long lastUpdateLeaderTime;
|
||||
|
||||
public static final Histogram PD_GET_REGION_BY_KEY_REQUEST_LATENCY =
|
||||
Histogram.build()
|
||||
|
@ -143,7 +150,7 @@ public class PDClient extends AbstractGRPCClient<PDBlockingStub, PDStub>
|
|||
*
|
||||
* @param region represents a region info
|
||||
*/
|
||||
void scatterRegion(TiRegion region, BackOffer backOffer) {
|
||||
void scatterRegion(Metapb.Region region, BackOffer backOffer) {
|
||||
Supplier<ScatterRegionRequest> request =
|
||||
() ->
|
||||
ScatterRegionRequest.newBuilder().setHeader(header).setRegionId(region.getId()).build();
|
||||
|
@ -167,7 +174,7 @@ public class PDClient extends AbstractGRPCClient<PDBlockingStub, PDStub>
|
|||
*
|
||||
* @param region
|
||||
*/
|
||||
void waitScatterRegionFinish(TiRegion region, BackOffer backOffer) {
|
||||
void waitScatterRegionFinish(Metapb.Region region, BackOffer backOffer) {
|
||||
for (; ; ) {
|
||||
GetOperatorResponse resp = getOperator(region.getId());
|
||||
if (resp != null) {
|
||||
|
@ -220,7 +227,7 @@ public class PDClient extends AbstractGRPCClient<PDBlockingStub, PDStub>
|
|||
}
|
||||
|
||||
@Override
|
||||
public TiRegion getRegionByKey(BackOffer backOffer, ByteString key) {
|
||||
public Pair<Metapb.Region, Metapb.Peer> getRegionByKey(BackOffer backOffer, ByteString key) {
|
||||
Histogram.Timer requestTimer = PD_GET_REGION_BY_KEY_REQUEST_LATENCY.startTimer();
|
||||
try {
|
||||
if (conf.getKvMode() == KVMode.TXN) {
|
||||
|
@ -238,42 +245,14 @@ public class PDClient extends AbstractGRPCClient<PDBlockingStub, PDStub>
|
|||
|
||||
GetRegionResponse resp =
|
||||
callWithRetry(backOffer, PDGrpc.getGetRegionMethod(), request, handler);
|
||||
return new TiRegion(
|
||||
resp.getRegion(),
|
||||
resp.getLeader(),
|
||||
conf.getIsolationLevel(),
|
||||
conf.getCommandPriority(),
|
||||
conf.getKvMode(),
|
||||
conf.isReplicaRead());
|
||||
return new Pair<Metapb.Region, Metapb.Peer>(decodeRegion(resp.getRegion()), resp.getLeader());
|
||||
} finally {
|
||||
requestTimer.observeDuration();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Future<TiRegion> getRegionByKeyAsync(BackOffer backOffer, ByteString key) {
|
||||
FutureObserver<TiRegion, GetRegionResponse> responseObserver =
|
||||
new FutureObserver<>(
|
||||
resp ->
|
||||
new TiRegion(
|
||||
resp.getRegion(),
|
||||
resp.getLeader(),
|
||||
conf.getIsolationLevel(),
|
||||
conf.getCommandPriority(),
|
||||
conf.getKvMode(),
|
||||
conf.isReplicaRead()));
|
||||
Supplier<GetRegionRequest> request =
|
||||
() -> GetRegionRequest.newBuilder().setHeader(header).setRegionKey(key).build();
|
||||
|
||||
PDErrorHandler<GetRegionResponse> handler =
|
||||
new PDErrorHandler<>(getRegionResponseErrorExtractor, this);
|
||||
|
||||
callAsyncWithRetry(backOffer, PDGrpc.getGetRegionMethod(), request, responseObserver, handler);
|
||||
return responseObserver.getFuture();
|
||||
}
|
||||
|
||||
@Override
|
||||
public TiRegion getRegionByID(BackOffer backOffer, long id) {
|
||||
public Pair<Metapb.Region, Metapb.Peer> getRegionByID(BackOffer backOffer, long id) {
|
||||
Supplier<GetRegionByIDRequest> request =
|
||||
() -> GetRegionByIDRequest.newBuilder().setHeader(header).setRegionId(id).build();
|
||||
PDErrorHandler<GetRegionResponse> handler =
|
||||
|
@ -281,37 +260,7 @@ public class PDClient extends AbstractGRPCClient<PDBlockingStub, PDStub>
|
|||
|
||||
GetRegionResponse resp =
|
||||
callWithRetry(backOffer, PDGrpc.getGetRegionByIDMethod(), request, handler);
|
||||
// Instead of using default leader instance, explicitly set no leader to null
|
||||
return new TiRegion(
|
||||
resp.getRegion(),
|
||||
resp.getLeader(),
|
||||
conf.getIsolationLevel(),
|
||||
conf.getCommandPriority(),
|
||||
conf.getKvMode(),
|
||||
conf.isReplicaRead());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Future<TiRegion> getRegionByIDAsync(BackOffer backOffer, long id) {
|
||||
FutureObserver<TiRegion, GetRegionResponse> responseObserver =
|
||||
new FutureObserver<>(
|
||||
resp ->
|
||||
new TiRegion(
|
||||
resp.getRegion(),
|
||||
resp.getLeader(),
|
||||
conf.getIsolationLevel(),
|
||||
conf.getCommandPriority(),
|
||||
conf.getKvMode(),
|
||||
conf.isReplicaRead()));
|
||||
|
||||
Supplier<GetRegionByIDRequest> request =
|
||||
() -> GetRegionByIDRequest.newBuilder().setHeader(header).setRegionId(id).build();
|
||||
PDErrorHandler<GetRegionResponse> handler =
|
||||
new PDErrorHandler<>(getRegionResponseErrorExtractor, this);
|
||||
|
||||
callAsyncWithRetry(
|
||||
backOffer, PDGrpc.getGetRegionByIDMethod(), request, responseObserver, handler);
|
||||
return responseObserver.getFuture();
|
||||
return new Pair<Metapb.Region, Metapb.Peer>(decodeRegion(resp.getRegion()), resp.getLeader());
|
||||
}
|
||||
|
||||
private Supplier<GetStoreRequest> buildGetStoreReq(long storeId) {
|
||||
|
@ -334,20 +283,6 @@ public class PDClient extends AbstractGRPCClient<PDBlockingStub, PDStub>
|
|||
.getStore();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Future<Store> getStoreAsync(BackOffer backOffer, long storeId) {
|
||||
FutureObserver<Store, GetStoreResponse> responseObserver =
|
||||
new FutureObserver<>(GetStoreResponse::getStore);
|
||||
|
||||
callAsyncWithRetry(
|
||||
backOffer,
|
||||
PDGrpc.getGetStoreMethod(),
|
||||
buildGetStoreReq(storeId),
|
||||
responseObserver,
|
||||
buildPDErrorHandler());
|
||||
return responseObserver.getFuture();
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<Store> getAllStores(BackOffer backOffer) {
|
||||
return callWithRetry(
|
||||
|
@ -361,8 +296,8 @@ public class PDClient extends AbstractGRPCClient<PDBlockingStub, PDStub>
|
|||
}
|
||||
|
||||
@Override
|
||||
public boolean isReplicaRead() {
|
||||
return conf.isReplicaRead();
|
||||
public TiConfiguration.ReplicaRead getReplicaRead() {
|
||||
return conf.getReplicaRead();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -385,14 +320,15 @@ public class PDClient extends AbstractGRPCClient<PDBlockingStub, PDStub>
|
|||
}
|
||||
|
||||
@VisibleForTesting
|
||||
LeaderWrapper getLeaderWrapper() {
|
||||
return leaderWrapper;
|
||||
PDClientWrapper getPdClientWrapper() {
|
||||
return pdClientWrapper;
|
||||
}
|
||||
|
||||
private GetMembersResponse getMembers(URI uri) {
|
||||
try {
|
||||
ManagedChannel probChan = channelFactory.getChannel(uriToAddr(uri), hostMapping);
|
||||
PDGrpc.PDBlockingStub stub = PDGrpc.newBlockingStub(probChan);
|
||||
PDGrpc.PDBlockingStub stub =
|
||||
PDGrpc.newBlockingStub(probChan).withDeadlineAfter(getTimeout(), TimeUnit.MILLISECONDS);
|
||||
GetMembersRequest request =
|
||||
GetMembersRequest.newBuilder().setHeader(RequestHeader.getDefaultInstance()).build();
|
||||
GetMembersResponse resp = stub.getMembers(request);
|
||||
|
@ -402,60 +338,154 @@ public class PDClient extends AbstractGRPCClient<PDBlockingStub, PDStub>
|
|||
}
|
||||
return resp;
|
||||
} catch (Exception e) {
|
||||
logger.warn("failed to get member from pd server.", e);
|
||||
logger.debug("failed to get member from pd server.", e);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
synchronized boolean switchLeader(List<String> leaderURLs) {
|
||||
if (leaderURLs.isEmpty()) return false;
|
||||
String leaderUrlStr = leaderURLs.get(0);
|
||||
// TODO: Why not strip protocol info on server side since grpc does not need it
|
||||
if (leaderWrapper != null && leaderUrlStr.equals(leaderWrapper.getLeaderInfo())) {
|
||||
return true;
|
||||
// return whether the leader has changed to target address `leaderUrlStr`.
|
||||
synchronized boolean trySwitchLeader(String leaderUrlStr) {
|
||||
if (pdClientWrapper != null) {
|
||||
if (leaderUrlStr.equals(pdClientWrapper.getLeaderInfo())) {
|
||||
// The message to leader is not forwarded by follower.
|
||||
if (leaderUrlStr.equals(pdClientWrapper.getStoreAddress())) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
// If leader has transfered to another member, we can create another leaderwrapper.
|
||||
}
|
||||
// switch leader
|
||||
return createLeaderWrapper(leaderUrlStr);
|
||||
return createLeaderClientWrapper(leaderUrlStr);
|
||||
}
|
||||
|
||||
private boolean createLeaderWrapper(String leaderUrlStr) {
|
||||
private synchronized boolean createLeaderClientWrapper(String leaderUrlStr) {
|
||||
try {
|
||||
URI newLeader = addrToUri(leaderUrlStr);
|
||||
leaderUrlStr = uriToAddr(newLeader);
|
||||
if (leaderWrapper != null && leaderUrlStr.equals(leaderWrapper.getLeaderInfo())) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// create new Leader
|
||||
ManagedChannel clientChannel = channelFactory.getChannel(leaderUrlStr, hostMapping);
|
||||
leaderWrapper =
|
||||
new LeaderWrapper(
|
||||
leaderUrlStr,
|
||||
PDGrpc.newBlockingStub(clientChannel),
|
||||
PDGrpc.newStub(clientChannel),
|
||||
System.nanoTime());
|
||||
pdClientWrapper =
|
||||
new PDClientWrapper(leaderUrlStr, leaderUrlStr, clientChannel, System.nanoTime());
|
||||
timeout = conf.getTimeout();
|
||||
} catch (IllegalArgumentException e) {
|
||||
logger.error("Error updating leader. " + leaderUrlStr, e);
|
||||
return false;
|
||||
}
|
||||
logger.info(String.format("Switched to new leader: %s", leaderWrapper));
|
||||
logger.info(String.format("Switched to new leader: %s", pdClientWrapper));
|
||||
return true;
|
||||
}
|
||||
|
||||
public void updateLeader() {
|
||||
synchronized boolean createFollowerClientWrapper(String followerUrlStr, String leaderUrls) {
|
||||
// TODO: Why not strip protocol info on server side since grpc does not need it
|
||||
|
||||
try {
|
||||
if (!checkHealth(followerUrlStr, hostMapping)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// create new Leader
|
||||
ManagedChannel channel = channelFactory.getChannel(followerUrlStr, hostMapping);
|
||||
pdClientWrapper = new PDClientWrapper(leaderUrls, followerUrlStr, channel, System.nanoTime());
|
||||
timeout = conf.getForwardTimeout();
|
||||
} catch (IllegalArgumentException e) {
|
||||
logger.error("Error updating follower. " + followerUrlStr, e);
|
||||
return false;
|
||||
}
|
||||
logger.info(String.format("Switched to new leader by follower forward: %s", pdClientWrapper));
|
||||
return true;
|
||||
}
|
||||
|
||||
public synchronized void updateLeaderOrforwardFollower() {
|
||||
if (System.currentTimeMillis() - lastUpdateLeaderTime < MIN_TRY_UPDATE_DURATION) {
|
||||
return;
|
||||
}
|
||||
for (URI url : this.pdAddrs) {
|
||||
// since resp is null, we need update leader's address by walking through all pd server.
|
||||
GetMembersResponse resp = getMembers(url);
|
||||
if (resp == null) {
|
||||
continue;
|
||||
}
|
||||
if (resp.getLeader().getClientUrlsList().isEmpty()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
String leaderUrlStr = resp.getLeader().getClientUrlsList().get(0);
|
||||
leaderUrlStr = uriToAddr(addrToUri(leaderUrlStr));
|
||||
|
||||
// if leader is switched, just return.
|
||||
if (switchLeader(resp.getLeader().getClientUrlsList())) {
|
||||
if (checkHealth(leaderUrlStr, hostMapping) && trySwitchLeader(leaderUrlStr)) {
|
||||
lastUpdateLeaderTime = System.currentTimeMillis();
|
||||
return;
|
||||
}
|
||||
|
||||
if (!conf.getEnableGrpcForward()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
logger.info(String.format("can not switch to new leader, try follower forward"));
|
||||
List<Pdpb.Member> members = resp.getMembersList();
|
||||
|
||||
boolean hasReachNextMember = false;
|
||||
// If we have not used follower forward, try the first follower.
|
||||
if (pdClientWrapper != null && pdClientWrapper.getStoreAddress().equals(leaderUrlStr)) {
|
||||
hasReachNextMember = true;
|
||||
}
|
||||
|
||||
for (int i = 0; i < members.size() * 2; i++) {
|
||||
Pdpb.Member member = members.get(i % members.size());
|
||||
if (member.getMemberId() == resp.getLeader().getMemberId()) {
|
||||
continue;
|
||||
}
|
||||
String followerUrlStr = member.getClientUrlsList().get(0);
|
||||
followerUrlStr = uriToAddr(addrToUri(followerUrlStr));
|
||||
if (pdClientWrapper != null && pdClientWrapper.getStoreAddress().equals(followerUrlStr)) {
|
||||
hasReachNextMember = true;
|
||||
continue;
|
||||
}
|
||||
if (hasReachNextMember && createFollowerClientWrapper(followerUrlStr, leaderUrlStr)) {
|
||||
logger.warn(
|
||||
String.format("forward request to pd [%s] by pd [%s]", leaderUrlStr, followerUrlStr));
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
lastUpdateLeaderTime = System.currentTimeMillis();
|
||||
if (pdClientWrapper == null) {
|
||||
throw new TiClientInternalException(
|
||||
"already tried all address on file, but not leader found yet.");
|
||||
}
|
||||
}
|
||||
|
||||
public void tryUpdateLeader() {
|
||||
for (URI url : this.pdAddrs) {
|
||||
// since resp is null, we need update leader's address by walking through all pd server.
|
||||
GetMembersResponse resp = getMembers(url);
|
||||
if (resp == null) {
|
||||
continue;
|
||||
}
|
||||
List<URI> urls =
|
||||
resp.getMembersList()
|
||||
.stream()
|
||||
.map(mem -> addrToUri(mem.getClientUrls(0)))
|
||||
.collect(Collectors.toList());
|
||||
String leaderUrlStr = resp.getLeader().getClientUrlsList().get(0);
|
||||
leaderUrlStr = uriToAddr(addrToUri(leaderUrlStr));
|
||||
|
||||
// If leader is not change but becomes available, we can cancel follower forward.
|
||||
if (checkHealth(leaderUrlStr, hostMapping) && trySwitchLeader(leaderUrlStr)) {
|
||||
if (!urls.equals(this.pdAddrs)) {
|
||||
tryUpdateMembers(urls);
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
throw new TiClientInternalException(
|
||||
"already tried all address on file, but not leader found yet.");
|
||||
lastUpdateLeaderTime = System.currentTimeMillis();
|
||||
if (pdClientWrapper == null) {
|
||||
throw new TiClientInternalException(
|
||||
"already tried all address on file, but not leader found yet.");
|
||||
}
|
||||
}
|
||||
|
||||
private synchronized void tryUpdateMembers(List<URI> members) {
|
||||
this.pdAddrs = members;
|
||||
}
|
||||
|
||||
public void updateTiFlashReplicaStatus() {
|
||||
|
@ -513,74 +543,112 @@ public class PDClient extends AbstractGRPCClient<PDBlockingStub, PDStub>
|
|||
|
||||
@Override
|
||||
protected PDBlockingStub getBlockingStub() {
|
||||
if (leaderWrapper == null) {
|
||||
if (pdClientWrapper == null) {
|
||||
throw new GrpcException("PDClient may not be initialized");
|
||||
}
|
||||
return leaderWrapper.getBlockingStub().withDeadlineAfter(getTimeout(), TimeUnit.MILLISECONDS);
|
||||
return pdClientWrapper.getBlockingStub().withDeadlineAfter(getTimeout(), TimeUnit.MILLISECONDS);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected PDStub getAsyncStub() {
|
||||
if (leaderWrapper == null) {
|
||||
if (pdClientWrapper == null) {
|
||||
throw new GrpcException("PDClient may not be initialized");
|
||||
}
|
||||
return leaderWrapper.getAsyncStub().withDeadlineAfter(getTimeout(), TimeUnit.MILLISECONDS);
|
||||
return pdClientWrapper.getAsyncStub().withDeadlineAfter(getTimeout(), TimeUnit.MILLISECONDS);
|
||||
}
|
||||
|
||||
private void initCluster() {
|
||||
GetMembersResponse resp = null;
|
||||
List<URI> pdAddrs = getConf().getPdAddrs();
|
||||
this.pdAddrs = pdAddrs;
|
||||
this.etcdClient = Client.builder().endpoints(pdAddrs).build();
|
||||
this.hostMapping = new HostMapping(this.etcdClient, conf.getNetworkMappingName());
|
||||
this.etcdClient =
|
||||
Client.builder()
|
||||
.endpoints(pdAddrs)
|
||||
.executorService(
|
||||
Executors.newCachedThreadPool(
|
||||
new ThreadFactoryBuilder()
|
||||
.setNameFormat("etcd-conn-manager-pool-%d")
|
||||
.setDaemon(true)
|
||||
.build()))
|
||||
.build();
|
||||
this.hostMapping =
|
||||
Optional.ofNullable(getConf().getHostMapping())
|
||||
.orElseGet(() -> new DefaultHostMapping(this.etcdClient, conf.getNetworkMappingName()));
|
||||
// The first request may cost too much latency
|
||||
long originTimeout = this.timeout;
|
||||
this.timeout = 2000;
|
||||
for (URI u : pdAddrs) {
|
||||
resp = getMembers(u);
|
||||
if (resp != null) {
|
||||
break;
|
||||
}
|
||||
logger.info("Could not get leader member with pd: " + u);
|
||||
}
|
||||
this.timeout = originTimeout;
|
||||
checkNotNull(resp, "Failed to init client for PD cluster.");
|
||||
long clusterId = resp.getHeader().getClusterId();
|
||||
header = RequestHeader.newBuilder().setClusterId(clusterId).build();
|
||||
tsoReq = TsoRequest.newBuilder().setHeader(header).setCount(1).build();
|
||||
this.tiflashReplicaMap = new ConcurrentHashMap<>();
|
||||
createLeaderWrapper(resp.getLeader().getClientUrls(0));
|
||||
this.pdAddrs =
|
||||
resp.getMembersList()
|
||||
.stream()
|
||||
.map(mem -> addrToUri(mem.getClientUrls(0)))
|
||||
.collect(Collectors.toList());
|
||||
logger.info("init cluster with address: " + this.pdAddrs);
|
||||
|
||||
String leaderUrlStr = resp.getLeader().getClientUrls(0);
|
||||
leaderUrlStr = uriToAddr(addrToUri(leaderUrlStr));
|
||||
createLeaderClientWrapper(leaderUrlStr);
|
||||
service =
|
||||
Executors.newSingleThreadScheduledExecutor(
|
||||
new ThreadFactoryBuilder().setDaemon(true).build());
|
||||
new ThreadFactoryBuilder()
|
||||
.setNameFormat("PDClient-update-leader-pool-%d")
|
||||
.setDaemon(true)
|
||||
.build());
|
||||
service.scheduleAtFixedRate(
|
||||
() -> {
|
||||
// Wrap this with a try catch block in case schedule update fails
|
||||
try {
|
||||
updateLeader();
|
||||
tryUpdateLeader();
|
||||
} catch (Exception e) {
|
||||
logger.warn("Update leader failed", e);
|
||||
}
|
||||
},
|
||||
1,
|
||||
1,
|
||||
TimeUnit.MINUTES);
|
||||
10,
|
||||
10,
|
||||
TimeUnit.SECONDS);
|
||||
tiflashReplicaService =
|
||||
Executors.newSingleThreadScheduledExecutor(
|
||||
new ThreadFactoryBuilder().setDaemon(true).build());
|
||||
new ThreadFactoryBuilder()
|
||||
.setNameFormat("PDClient-tiflash-replica-pool-%d")
|
||||
.setDaemon(true)
|
||||
.build());
|
||||
tiflashReplicaService.scheduleAtFixedRate(
|
||||
this::updateTiFlashReplicaStatus, 10, 10, TimeUnit.SECONDS);
|
||||
}
|
||||
|
||||
static class LeaderWrapper {
|
||||
static class PDClientWrapper {
|
||||
private final String leaderInfo;
|
||||
private final PDBlockingStub blockingStub;
|
||||
private final PDStub asyncStub;
|
||||
private final long createTime;
|
||||
private final String storeAddress;
|
||||
|
||||
LeaderWrapper(
|
||||
String leaderInfo,
|
||||
PDGrpc.PDBlockingStub blockingStub,
|
||||
PDGrpc.PDStub asyncStub,
|
||||
long createTime) {
|
||||
PDClientWrapper(
|
||||
String leaderInfo, String storeAddress, ManagedChannel clientChannel, long createTime) {
|
||||
if (!storeAddress.equals(leaderInfo)) {
|
||||
Metadata header = new Metadata();
|
||||
header.put(TiConfiguration.PD_FORWARD_META_DATA_KEY, addrToUri(leaderInfo).toString());
|
||||
this.blockingStub =
|
||||
MetadataUtils.attachHeaders(PDGrpc.newBlockingStub(clientChannel), header);
|
||||
this.asyncStub = MetadataUtils.attachHeaders(PDGrpc.newStub(clientChannel), header);
|
||||
} else {
|
||||
this.blockingStub = PDGrpc.newBlockingStub(clientChannel);
|
||||
this.asyncStub = PDGrpc.newStub(clientChannel);
|
||||
}
|
||||
this.leaderInfo = leaderInfo;
|
||||
this.blockingStub = blockingStub;
|
||||
this.asyncStub = asyncStub;
|
||||
this.storeAddress = storeAddress;
|
||||
this.createTime = createTime;
|
||||
}
|
||||
|
||||
|
@ -588,6 +656,10 @@ public class PDClient extends AbstractGRPCClient<PDBlockingStub, PDStub>
|
|||
return leaderInfo;
|
||||
}
|
||||
|
||||
String getStoreAddress() {
|
||||
return storeAddress;
|
||||
}
|
||||
|
||||
PDBlockingStub getBlockingStub() {
|
||||
return blockingStub;
|
||||
}
|
||||
|
@ -602,7 +674,32 @@ public class PDClient extends AbstractGRPCClient<PDBlockingStub, PDStub>
|
|||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "[leaderInfo: " + leaderInfo + "]";
|
||||
return "[leaderInfo: " + leaderInfo + ", storeAddress: " + storeAddress + "]";
|
||||
}
|
||||
}
|
||||
|
||||
private Metapb.Region decodeRegion(Metapb.Region region) {
|
||||
final boolean isRawRegion = conf.getKvMode() == KVMode.RAW;
|
||||
Metapb.Region.Builder builder =
|
||||
Metapb.Region.newBuilder()
|
||||
.setId(region.getId())
|
||||
.setRegionEpoch(region.getRegionEpoch())
|
||||
.addAllPeers(region.getPeersList());
|
||||
|
||||
if (region.getStartKey().isEmpty() || isRawRegion) {
|
||||
builder.setStartKey(region.getStartKey());
|
||||
} else {
|
||||
byte[] decodedStartKey = BytesCodec.readBytes(new CodecDataInput(region.getStartKey()));
|
||||
builder.setStartKey(ByteString.copyFrom(decodedStartKey));
|
||||
}
|
||||
|
||||
if (region.getEndKey().isEmpty() || isRawRegion) {
|
||||
builder.setEndKey(region.getEndKey());
|
||||
} else {
|
||||
byte[] decodedEndKey = BytesCodec.readBytes(new CodecDataInput(region.getEndKey()));
|
||||
builder.setEndKey(ByteString.copyFrom(decodedEndKey));
|
||||
}
|
||||
|
||||
return builder.build();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,10 +17,10 @@ package org.tikv.common;
|
|||
|
||||
import com.google.protobuf.ByteString;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.Future;
|
||||
import org.tikv.common.meta.TiTimestamp;
|
||||
import org.tikv.common.region.TiRegion;
|
||||
import org.tikv.common.util.BackOffer;
|
||||
import org.tikv.common.util.Pair;
|
||||
import org.tikv.kvproto.Metapb;
|
||||
import org.tikv.kvproto.Metapb.Store;
|
||||
|
||||
/** Readonly PD client including only reading related interface Supposed for TiDB-like use cases */
|
||||
|
@ -38,9 +38,7 @@ public interface ReadOnlyPDClient {
|
|||
* @param key key in bytes for locating a region
|
||||
* @return the region whose startKey and endKey range covers the given key
|
||||
*/
|
||||
TiRegion getRegionByKey(BackOffer backOffer, ByteString key);
|
||||
|
||||
Future<TiRegion> getRegionByKeyAsync(BackOffer backOffer, ByteString key);
|
||||
Pair<Metapb.Region, Metapb.Peer> getRegionByKey(BackOffer backOffer, ByteString key);
|
||||
|
||||
/**
|
||||
* Get Region by Region Id
|
||||
|
@ -48,9 +46,7 @@ public interface ReadOnlyPDClient {
|
|||
* @param id Region Id
|
||||
* @return the region corresponding to the given Id
|
||||
*/
|
||||
TiRegion getRegionByID(BackOffer backOffer, long id);
|
||||
|
||||
Future<TiRegion> getRegionByIDAsync(BackOffer backOffer, long id);
|
||||
Pair<Metapb.Region, Metapb.Peer> getRegionByID(BackOffer backOffer, long id);
|
||||
|
||||
HostMapping getHostMapping();
|
||||
|
||||
|
@ -62,9 +58,7 @@ public interface ReadOnlyPDClient {
|
|||
*/
|
||||
Store getStore(BackOffer backOffer, long storeId);
|
||||
|
||||
Future<Store> getStoreAsync(BackOffer backOffer, long storeId);
|
||||
|
||||
List<Store> getAllStores(BackOffer backOffer);
|
||||
|
||||
boolean isReplicaRead();
|
||||
TiConfiguration.ReplicaRead getReplicaRead();
|
||||
}
|
||||
|
|
|
@ -17,6 +17,7 @@ package org.tikv.common;
|
|||
|
||||
import static org.tikv.common.ConfigUtils.*;
|
||||
|
||||
import io.grpc.Metadata;
|
||||
import java.io.Serializable;
|
||||
import java.net.URI;
|
||||
import java.util.*;
|
||||
|
@ -24,6 +25,7 @@ import java.util.concurrent.ConcurrentHashMap;
|
|||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.tikv.common.pd.PDUtils;
|
||||
import org.tikv.common.replica.ReplicaSelector;
|
||||
import org.tikv.kvproto.Kvrpcpb.CommandPri;
|
||||
import org.tikv.kvproto.Kvrpcpb.IsolationLevel;
|
||||
|
||||
|
@ -31,6 +33,10 @@ public class TiConfiguration implements Serializable {
|
|||
|
||||
private static final Logger logger = LoggerFactory.getLogger(TiConfiguration.class);
|
||||
private static final ConcurrentHashMap<String, String> settings = new ConcurrentHashMap<>();
|
||||
public static final Metadata.Key FORWARD_META_DATA_KEY =
|
||||
Metadata.Key.of("tikv-forwarded-host", Metadata.ASCII_STRING_MARSHALLER);
|
||||
public static final Metadata.Key PD_FORWARD_META_DATA_KEY =
|
||||
Metadata.Key.of("pd-forwarded-host", Metadata.ASCII_STRING_MARSHALLER);
|
||||
|
||||
static {
|
||||
loadFromSystemProperties();
|
||||
|
@ -48,6 +54,7 @@ public class TiConfiguration implements Serializable {
|
|||
private static void loadFromDefaultProperties() {
|
||||
setIfMissing(TIKV_PD_ADDRESSES, DEF_PD_ADDRESSES);
|
||||
setIfMissing(TIKV_GRPC_TIMEOUT, DEF_TIMEOUT);
|
||||
setIfMissing(TIKV_GRPC_FORWARD_TIMEOUT, DEF_FORWARD_TIMEOUT);
|
||||
setIfMissing(TIKV_GRPC_SCAN_TIMEOUT, DEF_SCAN_TIMEOUT);
|
||||
setIfMissing(TIKV_GRPC_SCAN_BATCH_SIZE, DEF_SCAN_BATCH_SIZE);
|
||||
setIfMissing(TIKV_GRPC_MAX_FRAME_SIZE, DEF_MAX_FRAME_SIZE);
|
||||
|
@ -67,10 +74,14 @@ public class TiConfiguration implements Serializable {
|
|||
setIfMissing(TIKV_DB_PREFIX, DEF_DB_PREFIX);
|
||||
setIfMissing(TIKV_KV_CLIENT_CONCURRENCY, DEF_KV_CLIENT_CONCURRENCY);
|
||||
setIfMissing(TIKV_KV_MODE, TXN_KV_MODE);
|
||||
setIfMissing(TIKV_IS_REPLICA_READ, DEF_IS_REPLICA_READ);
|
||||
setIfMissing(TIKV_REPLICA_READ, DEF_REPLICA_READ);
|
||||
setIfMissing(TIKV_METRICS_ENABLE, DEF_METRICS_ENABLE);
|
||||
setIfMissing(TIKV_METRICS_PORT, DEF_METRICS_PORT);
|
||||
setIfMissing(TIKV_NETWORK_MAPPING_NAME, DEF_TIKV_NETWORK_MAPPING_NAME);
|
||||
setIfMissing(TIKV_ENABLE_GRPC_FORWARD, DEF_GRPC_FORWARD_ENABLE);
|
||||
setIfMissing(TIKV_GRPC_HEALTH_CHECK_TIMEOUT, DEF_CHECK_HEALTH_TIMEOUT);
|
||||
setIfMissing(TIKV_HEALTH_CHECK_PERIOD_DURATION, DEF_HEALTH_CHECK_PERIOD_DURATION);
|
||||
setIfMissing(TIKV_RAWKV_DEFAULT_BACKOFF_IN_MS, DEF_TIKV_RAWKV_DEFAULT_BACKOFF_IN_MS);
|
||||
}
|
||||
|
||||
public static void listAll() {
|
||||
|
@ -216,7 +227,19 @@ public class TiConfiguration implements Serializable {
|
|||
}
|
||||
}
|
||||
|
||||
private static ReplicaRead getReplicaRead(String key) {
|
||||
String value = get(key).toUpperCase(Locale.ROOT);
|
||||
if (FOLLOWER.equals(value)) {
|
||||
return ReplicaRead.FOLLOWER;
|
||||
} else if (LEADER_AND_FOLLOWER.equals(value)) {
|
||||
return ReplicaRead.LEADER_AND_FOLLOWER;
|
||||
} else {
|
||||
return ReplicaRead.LEADER;
|
||||
}
|
||||
}
|
||||
|
||||
private long timeout = getTimeAsMs(TIKV_GRPC_TIMEOUT);
|
||||
private long forwardTimeout = getTimeAsMs(TIKV_GRPC_FORWARD_TIMEOUT);
|
||||
private long scanTimeout = getTimeAsMs(TIKV_GRPC_SCAN_TIMEOUT);
|
||||
private int maxFrameSize = getInt(TIKV_GRPC_MAX_FRAME_SIZE);
|
||||
private List<URI> pdAddrs = getPdAddrs(TIKV_PD_ADDRESSES);
|
||||
|
@ -233,20 +256,34 @@ public class TiConfiguration implements Serializable {
|
|||
private boolean showRowId = getBoolean(TIKV_SHOW_ROWID);
|
||||
private String dbPrefix = get(TIKV_DB_PREFIX);
|
||||
private KVMode kvMode = getKvMode(TIKV_KV_MODE);
|
||||
private boolean enableGrpcForward = getBoolean(TIKV_ENABLE_GRPC_FORWARD);
|
||||
|
||||
private int kvClientConcurrency = getInt(TIKV_KV_CLIENT_CONCURRENCY);
|
||||
private boolean isReplicaRead = getBoolean(TIKV_IS_REPLICA_READ);
|
||||
private ReplicaRead replicaRead = getReplicaRead(TIKV_REPLICA_READ);
|
||||
private ReplicaSelector internalReplicaSelector = getReplicaSelector(replicaRead);
|
||||
private ReplicaSelector replicaSelector;
|
||||
|
||||
private boolean metricsEnable = getBoolean(TIKV_METRICS_ENABLE);
|
||||
private int metricsPort = getInt(TIKV_METRICS_PORT);
|
||||
private int grpcHealthCheckTimeout = getInt(TIKV_GRPC_HEALTH_CHECK_TIMEOUT);
|
||||
private int healthCheckPeriodDuration = getInt(TIKV_HEALTH_CHECK_PERIOD_DURATION);
|
||||
|
||||
private final String networkMappingName = get(TIKV_NETWORK_MAPPING_NAME);
|
||||
private HostMapping hostMapping = null;
|
||||
|
||||
private int rawKVDefaultBackoffInMS = getInt(TIKV_RAWKV_DEFAULT_BACKOFF_IN_MS);
|
||||
|
||||
public enum KVMode {
|
||||
TXN,
|
||||
RAW
|
||||
}
|
||||
|
||||
public enum ReplicaRead {
|
||||
LEADER,
|
||||
FOLLOWER,
|
||||
LEADER_AND_FOLLOWER
|
||||
}
|
||||
|
||||
public static TiConfiguration createDefault() {
|
||||
return new TiConfiguration();
|
||||
}
|
||||
|
@ -301,6 +338,15 @@ public class TiConfiguration implements Serializable {
|
|||
return this;
|
||||
}
|
||||
|
||||
public long getForwardTimeout() {
|
||||
return forwardTimeout;
|
||||
}
|
||||
|
||||
public TiConfiguration setForwardTimeout(long timeout) {
|
||||
this.forwardTimeout = timeout;
|
||||
return this;
|
||||
}
|
||||
|
||||
public long getScanTimeout() {
|
||||
return scanTimeout;
|
||||
}
|
||||
|
@ -457,15 +503,40 @@ public class TiConfiguration implements Serializable {
|
|||
return this;
|
||||
}
|
||||
|
||||
public boolean isReplicaRead() {
|
||||
return isReplicaRead;
|
||||
public ReplicaRead getReplicaRead() {
|
||||
return replicaRead;
|
||||
}
|
||||
|
||||
public TiConfiguration setReplicaRead(boolean isReplicaRead) {
|
||||
this.isReplicaRead = isReplicaRead;
|
||||
public TiConfiguration setReplicaRead(ReplicaRead replicaRead) {
|
||||
this.replicaRead = replicaRead;
|
||||
this.internalReplicaSelector = getReplicaSelector(this.replicaRead);
|
||||
return this;
|
||||
}
|
||||
|
||||
private ReplicaSelector getReplicaSelector(ReplicaRead replicaRead) {
|
||||
if (TiConfiguration.ReplicaRead.LEADER.equals(replicaRead)) {
|
||||
return ReplicaSelector.LEADER;
|
||||
} else if (TiConfiguration.ReplicaRead.FOLLOWER.equals(replicaRead)) {
|
||||
return ReplicaSelector.FOLLOWER;
|
||||
} else if (TiConfiguration.ReplicaRead.LEADER_AND_FOLLOWER.equals(replicaRead)) {
|
||||
return ReplicaSelector.LEADER_AND_FOLLOWER;
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
public ReplicaSelector getReplicaSelector() {
|
||||
if (replicaSelector != null) {
|
||||
return replicaSelector;
|
||||
} else {
|
||||
return internalReplicaSelector;
|
||||
}
|
||||
}
|
||||
|
||||
public void setReplicaSelector(ReplicaSelector replicaSelector) {
|
||||
this.replicaSelector = replicaSelector;
|
||||
}
|
||||
|
||||
public boolean isMetricsEnable() {
|
||||
return metricsEnable;
|
||||
}
|
||||
|
@ -487,4 +558,32 @@ public class TiConfiguration implements Serializable {
|
|||
public String getNetworkMappingName() {
|
||||
return this.networkMappingName;
|
||||
}
|
||||
|
||||
public HostMapping getHostMapping() {
|
||||
return hostMapping;
|
||||
}
|
||||
|
||||
public void setHostMapping(HostMapping mapping) {
|
||||
this.hostMapping = mapping;
|
||||
}
|
||||
|
||||
public boolean getEnableGrpcForward() {
|
||||
return this.enableGrpcForward;
|
||||
}
|
||||
|
||||
public long getGrpcHealthCheckTimeout() {
|
||||
return this.grpcHealthCheckTimeout;
|
||||
}
|
||||
|
||||
public long getHealthCheckPeriodDuration() {
|
||||
return this.healthCheckPeriodDuration;
|
||||
}
|
||||
|
||||
public int getRawKVDefaultBackoffInMS() {
|
||||
return rawKVDefaultBackoffInMS;
|
||||
}
|
||||
|
||||
public void setRawKVDefaultBackoffInMS(int rawKVDefaultBackoffInMS) {
|
||||
this.rawKVDefaultBackoffInMS = rawKVDefaultBackoffInMS;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,29 +20,24 @@ import static org.tikv.common.util.ClientUtils.groupKeysByRegion;
|
|||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.util.concurrent.ThreadFactoryBuilder;
|
||||
import com.google.protobuf.ByteString;
|
||||
import io.prometheus.client.CollectorRegistry;
|
||||
import io.prometheus.client.exporter.HTTPServer;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.function.Function;
|
||||
import java.util.stream.Collectors;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.tikv.common.catalog.Catalog;
|
||||
import org.tikv.common.event.CacheInvalidateEvent;
|
||||
import org.tikv.common.exception.TiKVException;
|
||||
import org.tikv.common.key.Key;
|
||||
import org.tikv.common.meta.TiTimestamp;
|
||||
import org.tikv.common.policy.RetryPolicy;
|
||||
import org.tikv.common.region.RegionManager;
|
||||
import org.tikv.common.region.RegionStoreClient;
|
||||
import org.tikv.common.region.RegionStoreClient.RegionStoreClientBuilder;
|
||||
import org.tikv.common.region.TiRegion;
|
||||
import org.tikv.common.region.TiStore;
|
||||
import org.tikv.common.util.*;
|
||||
import org.tikv.kvproto.Metapb;
|
||||
import org.tikv.raw.RawKVClient;
|
||||
|
@ -59,7 +54,6 @@ public class TiSession implements AutoCloseable {
|
|||
private static final Map<String, TiSession> sessionCachedMap = new HashMap<>();
|
||||
private final TiConfiguration conf;
|
||||
private final ChannelFactory channelFactory;
|
||||
private Function<CacheInvalidateEvent, Void> cacheInvalidateCallback;
|
||||
// below object creation is either heavy or making connection (pd), pending for lazy loading
|
||||
private volatile PDClient client;
|
||||
private volatile Catalog catalog;
|
||||
|
@ -71,33 +65,19 @@ public class TiSession implements AutoCloseable {
|
|||
private volatile ExecutorService batchScanThreadPool;
|
||||
private volatile ExecutorService deleteRangeThreadPool;
|
||||
private volatile RegionManager regionManager;
|
||||
private volatile boolean enableGrpcForward;
|
||||
private volatile RegionStoreClient.RegionStoreClientBuilder clientBuilder;
|
||||
private boolean isClosed = false;
|
||||
private HTTPServer server;
|
||||
private CollectorRegistry collectorRegistry;
|
||||
private volatile boolean isClosed = false;
|
||||
private MetricsServer metricsServer;
|
||||
|
||||
public TiSession(TiConfiguration conf) {
|
||||
this.conf = conf;
|
||||
this.channelFactory = new ChannelFactory(conf.getMaxFrameSize());
|
||||
this.client = PDClient.createRaw(conf, channelFactory);
|
||||
if (conf.isMetricsEnable()) {
|
||||
try {
|
||||
this.collectorRegistry = new CollectorRegistry();
|
||||
this.collectorRegistry.register(RawKVClient.RAW_REQUEST_LATENCY);
|
||||
this.collectorRegistry.register(RawKVClient.RAW_REQUEST_FAILURE);
|
||||
this.collectorRegistry.register(RawKVClient.RAW_REQUEST_SUCCESS);
|
||||
this.collectorRegistry.register(RegionStoreClient.GRPC_RAW_REQUEST_LATENCY);
|
||||
this.collectorRegistry.register(RetryPolicy.GRPC_SINGLE_REQUEST_LATENCY);
|
||||
this.collectorRegistry.register(RegionManager.GET_REGION_BY_KEY_REQUEST_LATENCY);
|
||||
this.collectorRegistry.register(PDClient.PD_GET_REGION_BY_KEY_REQUEST_LATENCY);
|
||||
this.server =
|
||||
new HTTPServer(
|
||||
new InetSocketAddress(conf.getMetricsPort()), this.collectorRegistry, true);
|
||||
logger.info("http server is up " + this.server.getPort());
|
||||
} catch (Exception e) {
|
||||
logger.error("http server not up");
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
this.enableGrpcForward = conf.getEnableGrpcForward();
|
||||
this.metricsServer = MetricsServer.getInstance(conf);
|
||||
if (this.enableGrpcForward) {
|
||||
logger.info("enable grpc forward for high available");
|
||||
}
|
||||
logger.info("TiSession initialized in " + conf.getKvMode() + " mode");
|
||||
}
|
||||
|
@ -122,22 +102,30 @@ public class TiSession implements AutoCloseable {
|
|||
}
|
||||
|
||||
public RawKVClient createRawClient() {
|
||||
checkIsClosed();
|
||||
|
||||
RegionStoreClientBuilder builder =
|
||||
new RegionStoreClientBuilder(conf, channelFactory, this.getRegionManager(), client);
|
||||
return new RawKVClient(this, builder);
|
||||
}
|
||||
|
||||
public KVClient createKVClient() {
|
||||
checkIsClosed();
|
||||
|
||||
RegionStoreClientBuilder builder =
|
||||
new RegionStoreClientBuilder(conf, channelFactory, this.getRegionManager(), client);
|
||||
return new KVClient(conf, builder);
|
||||
}
|
||||
|
||||
public TxnKVClient createTxnClient() {
|
||||
checkIsClosed();
|
||||
|
||||
return new TxnKVClient(conf, this.getRegionStoreClientBuilder(), this.getPDClient());
|
||||
}
|
||||
|
||||
public RegionStoreClient.RegionStoreClientBuilder getRegionStoreClientBuilder() {
|
||||
checkIsClosed();
|
||||
|
||||
RegionStoreClient.RegionStoreClientBuilder res = clientBuilder;
|
||||
if (res == null) {
|
||||
synchronized (this) {
|
||||
|
@ -157,18 +145,26 @@ public class TiSession implements AutoCloseable {
|
|||
}
|
||||
|
||||
public TiTimestamp getTimestamp() {
|
||||
checkIsClosed();
|
||||
|
||||
return getPDClient().getTimestamp(ConcreteBackOffer.newTsoBackOff());
|
||||
}
|
||||
|
||||
public Snapshot createSnapshot() {
|
||||
checkIsClosed();
|
||||
|
||||
return new Snapshot(getTimestamp(), this);
|
||||
}
|
||||
|
||||
public Snapshot createSnapshot(TiTimestamp ts) {
|
||||
checkIsClosed();
|
||||
|
||||
return new Snapshot(ts, this);
|
||||
}
|
||||
|
||||
public PDClient getPDClient() {
|
||||
checkIsClosed();
|
||||
|
||||
PDClient res = client;
|
||||
if (res == null) {
|
||||
synchronized (this) {
|
||||
|
@ -182,6 +178,8 @@ public class TiSession implements AutoCloseable {
|
|||
}
|
||||
|
||||
public Catalog getCatalog() {
|
||||
checkIsClosed();
|
||||
|
||||
Catalog res = catalog;
|
||||
if (res == null) {
|
||||
synchronized (this) {
|
||||
|
@ -194,12 +192,14 @@ public class TiSession implements AutoCloseable {
|
|||
return res;
|
||||
}
|
||||
|
||||
public synchronized RegionManager getRegionManager() {
|
||||
public RegionManager getRegionManager() {
|
||||
checkIsClosed();
|
||||
|
||||
RegionManager res = regionManager;
|
||||
if (res == null) {
|
||||
synchronized (this) {
|
||||
if (regionManager == null) {
|
||||
regionManager = new RegionManager(getPDClient(), this.cacheInvalidateCallback);
|
||||
regionManager = new RegionManager(getConf(), getPDClient(), this.channelFactory);
|
||||
}
|
||||
res = regionManager;
|
||||
}
|
||||
|
@ -208,6 +208,8 @@ public class TiSession implements AutoCloseable {
|
|||
}
|
||||
|
||||
public ExecutorService getThreadPoolForIndexScan() {
|
||||
checkIsClosed();
|
||||
|
||||
ExecutorService res = indexScanThreadPool;
|
||||
if (res == null) {
|
||||
synchronized (this) {
|
||||
|
@ -227,6 +229,8 @@ public class TiSession implements AutoCloseable {
|
|||
}
|
||||
|
||||
public ExecutorService getThreadPoolForTableScan() {
|
||||
checkIsClosed();
|
||||
|
||||
ExecutorService res = tableScanThreadPool;
|
||||
if (res == null) {
|
||||
synchronized (this) {
|
||||
|
@ -243,6 +247,8 @@ public class TiSession implements AutoCloseable {
|
|||
}
|
||||
|
||||
public ExecutorService getThreadPoolForBatchPut() {
|
||||
checkIsClosed();
|
||||
|
||||
ExecutorService res = batchPutThreadPool;
|
||||
if (res == null) {
|
||||
synchronized (this) {
|
||||
|
@ -262,6 +268,8 @@ public class TiSession implements AutoCloseable {
|
|||
}
|
||||
|
||||
public ExecutorService getThreadPoolForBatchGet() {
|
||||
checkIsClosed();
|
||||
|
||||
ExecutorService res = batchGetThreadPool;
|
||||
if (res == null) {
|
||||
synchronized (this) {
|
||||
|
@ -281,6 +289,8 @@ public class TiSession implements AutoCloseable {
|
|||
}
|
||||
|
||||
public ExecutorService getThreadPoolForBatchDelete() {
|
||||
checkIsClosed();
|
||||
|
||||
ExecutorService res = batchDeleteThreadPool;
|
||||
if (res == null) {
|
||||
synchronized (this) {
|
||||
|
@ -300,6 +310,8 @@ public class TiSession implements AutoCloseable {
|
|||
}
|
||||
|
||||
public ExecutorService getThreadPoolForBatchScan() {
|
||||
checkIsClosed();
|
||||
|
||||
ExecutorService res = batchScanThreadPool;
|
||||
if (res == null) {
|
||||
synchronized (this) {
|
||||
|
@ -319,6 +331,8 @@ public class TiSession implements AutoCloseable {
|
|||
}
|
||||
|
||||
public ExecutorService getThreadPoolForDeleteRange() {
|
||||
checkIsClosed();
|
||||
|
||||
ExecutorService res = deleteRangeThreadPool;
|
||||
if (res == null) {
|
||||
synchronized (this) {
|
||||
|
@ -339,22 +353,11 @@ public class TiSession implements AutoCloseable {
|
|||
|
||||
@VisibleForTesting
|
||||
public ChannelFactory getChannelFactory() {
|
||||
checkIsClosed();
|
||||
|
||||
return channelFactory;
|
||||
}
|
||||
|
||||
public CollectorRegistry getCollectorRegistry() {
|
||||
return collectorRegistry;
|
||||
}
|
||||
|
||||
/**
|
||||
* This is used for setting call back function to invalidate cache information
|
||||
*
|
||||
* @param callBackFunc callback function
|
||||
*/
|
||||
public void injectCallBackFunc(Function<CacheInvalidateEvent, Void> callBackFunc) {
|
||||
this.cacheInvalidateCallback = callBackFunc;
|
||||
}
|
||||
|
||||
/**
|
||||
* split region and scatter
|
||||
*
|
||||
|
@ -365,11 +368,13 @@ public class TiSession implements AutoCloseable {
|
|||
int splitRegionBackoffMS,
|
||||
int scatterRegionBackoffMS,
|
||||
int scatterWaitMS) {
|
||||
checkIsClosed();
|
||||
|
||||
logger.info(String.format("split key's size is %d", splitKeys.size()));
|
||||
long startMS = System.currentTimeMillis();
|
||||
|
||||
// split region
|
||||
List<TiRegion> newRegions =
|
||||
List<Metapb.Region> newRegions =
|
||||
splitRegion(
|
||||
splitKeys
|
||||
.stream()
|
||||
|
@ -378,7 +383,7 @@ public class TiSession implements AutoCloseable {
|
|||
ConcreteBackOffer.newCustomBackOff(splitRegionBackoffMS));
|
||||
|
||||
// scatter region
|
||||
for (TiRegion newRegion : newRegions) {
|
||||
for (Metapb.Region newRegion : newRegions) {
|
||||
try {
|
||||
getPDClient()
|
||||
.scatterRegion(newRegion, ConcreteBackOffer.newCustomBackOff(scatterRegionBackoffMS));
|
||||
|
@ -391,7 +396,7 @@ public class TiSession implements AutoCloseable {
|
|||
if (scatterWaitMS > 0) {
|
||||
logger.info("start to wait scatter region finish");
|
||||
long scatterRegionStartMS = System.currentTimeMillis();
|
||||
for (TiRegion newRegion : newRegions) {
|
||||
for (Metapb.Region newRegion : newRegions) {
|
||||
long remainMS = (scatterRegionStartMS + scatterWaitMS) - System.currentTimeMillis();
|
||||
if (remainMS <= 0) {
|
||||
logger.warn("wait scatter region timeout");
|
||||
|
@ -408,17 +413,17 @@ public class TiSession implements AutoCloseable {
|
|||
logger.info("splitRegionAndScatter cost {} seconds", (endMS - startMS) / 1000);
|
||||
}
|
||||
|
||||
private List<TiRegion> splitRegion(List<ByteString> splitKeys, BackOffer backOffer) {
|
||||
List<TiRegion> regions = new ArrayList<>();
|
||||
private List<Metapb.Region> splitRegion(List<ByteString> splitKeys, BackOffer backOffer) {
|
||||
List<Metapb.Region> regions = new ArrayList<>();
|
||||
|
||||
Map<TiRegion, List<ByteString>> groupKeys =
|
||||
groupKeysByRegion(regionManager, splitKeys, backOffer);
|
||||
for (Map.Entry<TiRegion, List<ByteString>> entry : groupKeys.entrySet()) {
|
||||
|
||||
Pair<TiRegion, Metapb.Store> pair =
|
||||
Pair<TiRegion, TiStore> pair =
|
||||
getRegionManager().getRegionStorePairByKey(entry.getKey().getStartKey());
|
||||
TiRegion region = pair.first;
|
||||
Metapb.Store store = pair.second;
|
||||
TiStore store = pair.second;
|
||||
List<ByteString> splits =
|
||||
entry
|
||||
.getValue()
|
||||
|
@ -431,7 +436,7 @@ public class TiSession implements AutoCloseable {
|
|||
"split key equal to region start key or end key. Region splitting is not needed.");
|
||||
} else {
|
||||
logger.info("start to split region id={}, split size={}", region.getId(), splits.size());
|
||||
List<TiRegion> newRegions;
|
||||
List<Metapb.Region> newRegions;
|
||||
try {
|
||||
newRegions = getRegionStoreClientBuilder().build(region, store).splitRegion(splits);
|
||||
} catch (final TiKVException e) {
|
||||
|
@ -450,49 +455,111 @@ public class TiSession implements AutoCloseable {
|
|||
return regions;
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void close() throws Exception {
|
||||
private void checkIsClosed() {
|
||||
if (isClosed) {
|
||||
logger.warn("this TiSession is already closed!");
|
||||
return;
|
||||
}
|
||||
|
||||
if (server != null) {
|
||||
server.stop();
|
||||
logger.info("Metrics server on " + server.getPort() + " is stopped");
|
||||
}
|
||||
|
||||
isClosed = true;
|
||||
synchronized (sessionCachedMap) {
|
||||
sessionCachedMap.remove(conf.getPdAddrsString());
|
||||
}
|
||||
|
||||
if (tableScanThreadPool != null) {
|
||||
tableScanThreadPool.shutdownNow();
|
||||
}
|
||||
if (indexScanThreadPool != null) {
|
||||
indexScanThreadPool.shutdownNow();
|
||||
}
|
||||
if (batchGetThreadPool != null) {
|
||||
batchGetThreadPool.shutdownNow();
|
||||
}
|
||||
if (batchPutThreadPool != null) {
|
||||
batchPutThreadPool.shutdownNow();
|
||||
}
|
||||
if (batchDeleteThreadPool != null) {
|
||||
batchDeleteThreadPool.shutdownNow();
|
||||
}
|
||||
if (batchScanThreadPool != null) {
|
||||
batchScanThreadPool.shutdownNow();
|
||||
}
|
||||
if (deleteRangeThreadPool != null) {
|
||||
deleteRangeThreadPool.shutdownNow();
|
||||
}
|
||||
if (client != null) {
|
||||
getPDClient().close();
|
||||
}
|
||||
if (catalog != null) {
|
||||
getCatalog().close();
|
||||
throw new RuntimeException("this TiSession is closed!");
|
||||
}
|
||||
}
|
||||
|
||||
public synchronized void closeAwaitTermination(long timeoutMS) throws Exception {
|
||||
shutdown(false);
|
||||
|
||||
long startMS = System.currentTimeMillis();
|
||||
while (true) {
|
||||
if (isTerminatedExecutorServices()) {
|
||||
cleanAfterTerminated();
|
||||
return;
|
||||
}
|
||||
|
||||
if (System.currentTimeMillis() - startMS > timeoutMS) {
|
||||
shutdown(true);
|
||||
return;
|
||||
}
|
||||
Thread.sleep(500);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void close() throws Exception {
|
||||
shutdown(true);
|
||||
}
|
||||
|
||||
private synchronized void shutdown(boolean now) throws Exception {
|
||||
if (!isClosed) {
|
||||
isClosed = true;
|
||||
synchronized (sessionCachedMap) {
|
||||
sessionCachedMap.remove(conf.getPdAddrsString());
|
||||
}
|
||||
|
||||
if (metricsServer != null) {
|
||||
metricsServer.close();
|
||||
}
|
||||
}
|
||||
|
||||
if (now) {
|
||||
shutdownNowExecutorServices();
|
||||
cleanAfterTerminated();
|
||||
} else {
|
||||
shutdownExecutorServices();
|
||||
}
|
||||
}
|
||||
|
||||
private synchronized void cleanAfterTerminated() throws InterruptedException {
|
||||
if (regionManager != null) {
|
||||
regionManager.close();
|
||||
}
|
||||
if (client != null) {
|
||||
client.close();
|
||||
}
|
||||
if (catalog != null) {
|
||||
catalog.close();
|
||||
}
|
||||
}
|
||||
|
||||
private List<ExecutorService> getExecutorServices() {
|
||||
List<ExecutorService> executorServiceList = new ArrayList<>();
|
||||
if (tableScanThreadPool != null) {
|
||||
executorServiceList.add(tableScanThreadPool);
|
||||
}
|
||||
if (indexScanThreadPool != null) {
|
||||
executorServiceList.add(indexScanThreadPool);
|
||||
}
|
||||
if (batchGetThreadPool != null) {
|
||||
executorServiceList.add(batchGetThreadPool);
|
||||
}
|
||||
if (batchPutThreadPool != null) {
|
||||
executorServiceList.add(batchPutThreadPool);
|
||||
}
|
||||
if (batchDeleteThreadPool != null) {
|
||||
executorServiceList.add(batchDeleteThreadPool);
|
||||
}
|
||||
if (batchScanThreadPool != null) {
|
||||
executorServiceList.add(batchScanThreadPool);
|
||||
}
|
||||
if (deleteRangeThreadPool != null) {
|
||||
executorServiceList.add(deleteRangeThreadPool);
|
||||
}
|
||||
return executorServiceList;
|
||||
}
|
||||
|
||||
private void shutdownExecutorServices() {
|
||||
for (ExecutorService executorService : getExecutorServices()) {
|
||||
executorService.shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
private void shutdownNowExecutorServices() {
|
||||
for (ExecutorService executorService : getExecutorServices()) {
|
||||
executorService.shutdownNow();
|
||||
}
|
||||
}
|
||||
|
||||
private boolean isTerminatedExecutorServices() {
|
||||
for (ExecutorService executorService : getExecutorServices()) {
|
||||
if (!executorService.isTerminated()) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,258 +0,0 @@
|
|||
/*
|
||||
* Copyright 2019 PingCAP, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.tikv.common.allocator;
|
||||
|
||||
import com.google.common.primitives.UnsignedLongs;
|
||||
import com.google.protobuf.ByteString;
|
||||
import java.io.Serializable;
|
||||
import java.util.Arrays;
|
||||
import java.util.function.Function;
|
||||
import org.tikv.common.Snapshot;
|
||||
import org.tikv.common.TiSession;
|
||||
import org.tikv.common.codec.CodecDataInput;
|
||||
import org.tikv.common.codec.CodecDataOutput;
|
||||
import org.tikv.common.codec.MetaCodec;
|
||||
import org.tikv.common.exception.AllocateRowIDOverflowException;
|
||||
import org.tikv.common.exception.TiBatchWriteException;
|
||||
import org.tikv.common.meta.TiTableInfo;
|
||||
import org.tikv.common.util.BackOffer;
|
||||
import org.tikv.common.util.ConcreteBackOffer;
|
||||
import org.tikv.txn.TwoPhaseCommitter;
|
||||
|
||||
/**
|
||||
* RowIDAllocator read current start from TiKV and write back 'start+step' back to TiKV. It designs
|
||||
* to allocate all id for data to be written at once, hence it does not need run inside a txn.
|
||||
*
|
||||
* <p>(start, end] is allocated
|
||||
*/
|
||||
public final class RowIDAllocator implements Serializable {
|
||||
private final long maxShardRowIDBits;
|
||||
private final long dbId;
|
||||
private final TiSession session;
|
||||
private final long step;
|
||||
private long end;
|
||||
|
||||
private RowIDAllocator(long maxShardRowIDBits, long dbId, long step, TiSession session) {
|
||||
this.maxShardRowIDBits = maxShardRowIDBits;
|
||||
this.dbId = dbId;
|
||||
this.step = step;
|
||||
this.session = session;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param index should >= 1
|
||||
* @return
|
||||
*/
|
||||
public long getShardRowId(long index) {
|
||||
return getShardRowId(maxShardRowIDBits, index, index + getStart());
|
||||
}
|
||||
|
||||
static long getShardRowId(long maxShardRowIDBits, long partitionIndex, long rowID) {
|
||||
if (maxShardRowIDBits <= 0 || maxShardRowIDBits >= 16) {
|
||||
return rowID;
|
||||
}
|
||||
|
||||
// assert rowID < Math.pow(2, 64 - maxShardRowIDBits)
|
||||
|
||||
long partition = partitionIndex & ((1L << maxShardRowIDBits) - 1);
|
||||
return rowID | (partition << (64 - maxShardRowIDBits - 1));
|
||||
}
|
||||
|
||||
public static RowIDAllocator create(
|
||||
long dbId, TiTableInfo table, TiSession session, boolean unsigned, long step) {
|
||||
RowIDAllocator allocator =
|
||||
new RowIDAllocator(table.getMaxShardRowIDBits(), dbId, step, session);
|
||||
if (unsigned) {
|
||||
allocator.initUnsigned(session.createSnapshot(), table.getId(), table.getMaxShardRowIDBits());
|
||||
} else {
|
||||
allocator.initSigned(session.createSnapshot(), table.getId(), table.getMaxShardRowIDBits());
|
||||
}
|
||||
|
||||
return allocator;
|
||||
}
|
||||
|
||||
public long getStart() {
|
||||
return end - step;
|
||||
}
|
||||
|
||||
public long getEnd() {
|
||||
return end;
|
||||
}
|
||||
|
||||
// set key value pair to tikv via two phase committer protocol.
|
||||
private void set(ByteString key, byte[] value) {
|
||||
TwoPhaseCommitter twoPhaseCommitter =
|
||||
new TwoPhaseCommitter(session, session.getTimestamp().getVersion());
|
||||
|
||||
twoPhaseCommitter.prewritePrimaryKey(
|
||||
ConcreteBackOffer.newCustomBackOff(BackOffer.PREWRITE_MAX_BACKOFF),
|
||||
key.toByteArray(),
|
||||
value);
|
||||
|
||||
twoPhaseCommitter.commitPrimaryKey(
|
||||
ConcreteBackOffer.newCustomBackOff(BackOffer.BATCH_COMMIT_BACKOFF),
|
||||
key.toByteArray(),
|
||||
session.getTimestamp().getVersion());
|
||||
|
||||
try {
|
||||
twoPhaseCommitter.close();
|
||||
} catch (Throwable ignored) {
|
||||
}
|
||||
}
|
||||
|
||||
private void updateMeta(ByteString key, byte[] oldVal, Snapshot snapshot) {
|
||||
// 1. encode hash meta key
|
||||
// 2. load meta via hash meta key from TiKV
|
||||
// 3. update meta's filed count and set it back to TiKV
|
||||
CodecDataOutput cdo = new CodecDataOutput();
|
||||
ByteString metaKey = MetaCodec.encodeHashMetaKey(cdo, key.toByteArray());
|
||||
long fieldCount;
|
||||
ByteString metaVal = snapshot.get(metaKey);
|
||||
|
||||
// decode long from bytes
|
||||
// big endian the 8 bytes
|
||||
fieldCount = new CodecDataInput(metaVal.toByteArray()).readLong();
|
||||
|
||||
// update meta field count only oldVal is null
|
||||
if (oldVal == null || oldVal.length == 0) {
|
||||
fieldCount++;
|
||||
cdo.reset();
|
||||
cdo.writeLong(fieldCount);
|
||||
|
||||
set(metaKey, cdo.toBytes());
|
||||
}
|
||||
}
|
||||
|
||||
private long updateHash(
|
||||
ByteString key,
|
||||
ByteString field,
|
||||
Function<byte[], byte[]> calculateNewVal,
|
||||
Snapshot snapshot) {
|
||||
// 1. encode hash data key
|
||||
// 2. get value in byte from get operation
|
||||
// 3. calculate new value via calculateNewVal
|
||||
// 4. check old value equals to new value or not
|
||||
// 5. set the new value back to TiKV via 2pc
|
||||
// 6. encode a hash meta key
|
||||
// 7. update a hash meta field count if needed
|
||||
|
||||
CodecDataOutput cdo = new CodecDataOutput();
|
||||
MetaCodec.encodeHashDataKey(cdo, key.toByteArray(), field.toByteArray());
|
||||
ByteString dataKey = cdo.toByteString();
|
||||
byte[] oldVal = snapshot.get(dataKey.toByteArray());
|
||||
|
||||
byte[] newVal = calculateNewVal.apply(oldVal);
|
||||
if (Arrays.equals(newVal, oldVal)) {
|
||||
// not need to update
|
||||
return 0L;
|
||||
}
|
||||
|
||||
set(dataKey, newVal);
|
||||
updateMeta(key, oldVal, snapshot);
|
||||
return Long.parseLong(new String(newVal));
|
||||
}
|
||||
|
||||
private static boolean isDBExisted(long dbId, Snapshot snapshot) {
|
||||
ByteString dbKey = MetaCodec.encodeDatabaseID(dbId);
|
||||
ByteString json = MetaCodec.hashGet(MetaCodec.KEY_DBs, dbKey, snapshot);
|
||||
return json != null && !json.isEmpty();
|
||||
}
|
||||
|
||||
private static boolean isTableExisted(long dbId, long tableId, Snapshot snapshot) {
|
||||
ByteString dbKey = MetaCodec.encodeDatabaseID(dbId);
|
||||
ByteString tableKey = MetaCodec.tableKey(tableId);
|
||||
return !MetaCodec.hashGet(dbKey, tableKey, snapshot).isEmpty();
|
||||
}
|
||||
|
||||
public static boolean shardRowBitsOverflow(
|
||||
long base, long step, long shardRowBits, boolean reservedSignBit) {
|
||||
long signBit = reservedSignBit ? 1 : 0;
|
||||
long mask = ((1L << shardRowBits) - 1) << (64 - shardRowBits - signBit);
|
||||
if (reservedSignBit) {
|
||||
return ((base + step) & mask) > 0;
|
||||
} else {
|
||||
return Long.compareUnsigned((base + step) & mask, 0) > 0;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* read current row id from TiKV and write the calculated value back to TiKV. The calculation rule
|
||||
* is start(read from TiKV) + step.
|
||||
*/
|
||||
public long udpateAllocateId(
|
||||
long dbId, long tableId, long step, Snapshot snapshot, long shard, boolean hasSignedBit) {
|
||||
if (isDBExisted(dbId, snapshot) && isTableExisted(dbId, tableId, snapshot)) {
|
||||
return updateHash(
|
||||
MetaCodec.encodeDatabaseID(dbId),
|
||||
MetaCodec.autoTableIDKey(tableId),
|
||||
(oldVal) -> {
|
||||
long base = 0;
|
||||
if (oldVal != null && oldVal.length != 0) {
|
||||
base = Long.parseLong(new String(oldVal));
|
||||
}
|
||||
if (shard >= 1 && shardRowBitsOverflow(base, step, shard, hasSignedBit)) {
|
||||
throw new AllocateRowIDOverflowException(base, step, shard);
|
||||
}
|
||||
base += step;
|
||||
return String.valueOf(base).getBytes();
|
||||
},
|
||||
snapshot);
|
||||
}
|
||||
|
||||
throw new IllegalArgumentException("table or database is not existed");
|
||||
}
|
||||
|
||||
/** read current row id from TiKV according to database id and table id. */
|
||||
public static long getAllocateId(long dbId, long tableId, Snapshot snapshot) {
|
||||
if (isDBExisted(dbId, snapshot) && isTableExisted(dbId, tableId, snapshot)) {
|
||||
ByteString dbKey = MetaCodec.encodeDatabaseID(dbId);
|
||||
ByteString tblKey = MetaCodec.autoTableIDKey(tableId);
|
||||
ByteString val = MetaCodec.hashGet(dbKey, tblKey, snapshot);
|
||||
if (val.isEmpty()) return 0L;
|
||||
return Long.parseLong(val.toStringUtf8());
|
||||
}
|
||||
|
||||
throw new IllegalArgumentException("table or database is not existed");
|
||||
}
|
||||
|
||||
private void initSigned(Snapshot snapshot, long tableId, long shard) {
|
||||
// get new start from TiKV, and calculate new end and set it back to TiKV.
|
||||
long newStart = getAllocateId(dbId, tableId, snapshot);
|
||||
long tmpStep = Math.min(Long.MAX_VALUE - newStart, step);
|
||||
if (tmpStep != step) {
|
||||
throw new TiBatchWriteException("cannot allocate ids for this write");
|
||||
}
|
||||
if (newStart == Long.MAX_VALUE) {
|
||||
throw new TiBatchWriteException("cannot allocate more ids since it ");
|
||||
}
|
||||
end = udpateAllocateId(dbId, tableId, tmpStep, snapshot, shard, true);
|
||||
}
|
||||
|
||||
private void initUnsigned(Snapshot snapshot, long tableId, long shard) {
|
||||
// get new start from TiKV, and calculate new end and set it back to TiKV.
|
||||
long newStart = getAllocateId(dbId, tableId, snapshot);
|
||||
// for unsigned long, -1L is max value.
|
||||
long tmpStep = UnsignedLongs.min(-1L - newStart, step);
|
||||
if (tmpStep != step) {
|
||||
throw new TiBatchWriteException("cannot allocate ids for this write");
|
||||
}
|
||||
// when compare unsigned long, the min value is largest value.
|
||||
if (UnsignedLongs.compare(newStart, -1L) == 0) {
|
||||
throw new TiBatchWriteException(
|
||||
"cannot allocate more ids since the start reaches " + "unsigned long's max value ");
|
||||
}
|
||||
end = udpateAllocateId(dbId, tableId, tmpStep, snapshot, shard, false);
|
||||
}
|
||||
}
|
|
@ -24,7 +24,7 @@ public class RegionException extends TiKVException {
|
|||
private final Error regionErr;
|
||||
|
||||
public RegionException(Error regionErr) {
|
||||
super("Region Exception occurred" + regionErr.getMessage());
|
||||
super("Region Exception occurred " + regionErr.getMessage());
|
||||
this.regionErr = regionErr;
|
||||
}
|
||||
|
||||
|
|
|
@ -19,20 +19,14 @@ package org.tikv.common.operation;
|
|||
|
||||
import static org.tikv.common.util.BackOffFunction.BackOffFuncType.BoTxnLockFast;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
import io.grpc.Status;
|
||||
import io.grpc.StatusRuntimeException;
|
||||
import java.util.Collections;
|
||||
import java.util.function.Function;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.tikv.common.codec.KeyUtils;
|
||||
import org.tikv.common.exception.GrpcException;
|
||||
import org.tikv.common.exception.KeyException;
|
||||
import org.tikv.common.region.RegionErrorReceiver;
|
||||
import org.tikv.common.region.RegionManager;
|
||||
import org.tikv.common.region.TiRegion;
|
||||
import org.tikv.common.util.BackOffFunction;
|
||||
import org.tikv.common.util.BackOffer;
|
||||
import org.tikv.kvproto.Errorpb;
|
||||
import org.tikv.kvproto.Kvrpcpb;
|
||||
|
@ -43,16 +37,12 @@ import org.tikv.txn.ResolveLockResult;
|
|||
// TODO: consider refactor to Builder mode
|
||||
public class KVErrorHandler<RespT> implements ErrorHandler<RespT> {
|
||||
private static final Logger logger = LoggerFactory.getLogger(KVErrorHandler.class);
|
||||
// if a store does not have leader currently, store id is set to 0
|
||||
private static final int NO_LEADER_STORE_ID = 0;
|
||||
private final Function<RespT, Errorpb.Error> getRegionError;
|
||||
private final Function<RespT, Kvrpcpb.KeyError> getKeyError;
|
||||
private final Function<ResolveLockResult, Object> resolveLockResultCallback;
|
||||
private final RegionManager regionManager;
|
||||
private final RegionErrorReceiver recv;
|
||||
private final AbstractLockResolverClient lockResolverClient;
|
||||
private final long callerStartTS;
|
||||
private final boolean forWrite;
|
||||
private final RegionErrorHandler<RespT> regionHandler;
|
||||
|
||||
public KVErrorHandler(
|
||||
RegionManager regionManager,
|
||||
|
@ -63,42 +53,14 @@ public class KVErrorHandler<RespT> implements ErrorHandler<RespT> {
|
|||
Function<ResolveLockResult, Object> resolveLockResultCallback,
|
||||
long callerStartTS,
|
||||
boolean forWrite) {
|
||||
this.recv = recv;
|
||||
this.regionHandler = new RegionErrorHandler<>(regionManager, recv, getRegionError);
|
||||
this.lockResolverClient = lockResolverClient;
|
||||
this.regionManager = regionManager;
|
||||
this.getRegionError = getRegionError;
|
||||
this.getKeyError = getKeyError;
|
||||
this.resolveLockResultCallback = resolveLockResultCallback;
|
||||
this.callerStartTS = callerStartTS;
|
||||
this.forWrite = forWrite;
|
||||
}
|
||||
|
||||
public KVErrorHandler(
|
||||
RegionManager regionManager,
|
||||
RegionErrorReceiver recv,
|
||||
Function<RespT, Errorpb.Error> getRegionError) {
|
||||
this.recv = recv;
|
||||
this.lockResolverClient = null;
|
||||
this.regionManager = regionManager;
|
||||
this.getRegionError = getRegionError;
|
||||
this.getKeyError = resp -> null;
|
||||
this.resolveLockResultCallback = resolveLock -> null;
|
||||
this.callerStartTS = 0;
|
||||
this.forWrite = false;
|
||||
}
|
||||
|
||||
private Errorpb.Error getRegionError(RespT resp) {
|
||||
if (getRegionError != null) {
|
||||
return getRegionError.apply(resp);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
private void invalidateRegionStoreCache(TiRegion ctxRegion) {
|
||||
regionManager.invalidateRegion(ctxRegion);
|
||||
regionManager.invalidateStore(ctxRegion.getLeader().getStoreId());
|
||||
}
|
||||
|
||||
private void resolveLock(BackOffer backOffer, Lock lock) {
|
||||
if (lockResolverClient != null) {
|
||||
logger.warn("resolving lock");
|
||||
|
@ -124,160 +86,34 @@ public class KVErrorHandler<RespT> implements ErrorHandler<RespT> {
|
|||
public boolean handleResponseError(BackOffer backOffer, RespT resp) {
|
||||
if (resp == null) {
|
||||
String msg =
|
||||
String.format(
|
||||
"Request Failed with unknown reason for region region [%s]", recv.getRegion());
|
||||
String.format("Request Failed with unknown reason for [%s]", regionHandler.getRegion());
|
||||
logger.warn(msg);
|
||||
return handleRequestError(backOffer, new GrpcException(msg));
|
||||
}
|
||||
|
||||
// Region error handling logic
|
||||
Errorpb.Error error = getRegionError(resp);
|
||||
Errorpb.Error error = regionHandler.getRegionError(resp);
|
||||
if (error != null) {
|
||||
if (error.hasNotLeader()) {
|
||||
// this error is reported from raftstore:
|
||||
// peer of current request is not leader, the following might be its causes:
|
||||
// 1. cache is outdated, region has changed its leader, can be solved by re-fetching from PD
|
||||
// 2. leader of current region is missing, need to wait and then fetch region info from PD
|
||||
long newStoreId = error.getNotLeader().getLeader().getStoreId();
|
||||
boolean retry;
|
||||
|
||||
// update Leader here
|
||||
logger.warn(
|
||||
String.format(
|
||||
"NotLeader Error with region id %d and store id %d, new store id %d",
|
||||
recv.getRegion().getId(), recv.getRegion().getLeader().getStoreId(), newStoreId));
|
||||
|
||||
BackOffFunction.BackOffFuncType backOffFuncType;
|
||||
// if there's current no leader, we do not trigger update pd cache logic
|
||||
// since issuing store = NO_LEADER_STORE_ID requests to pd will definitely fail.
|
||||
if (newStoreId != NO_LEADER_STORE_ID) {
|
||||
// If update leader fails, we need to fetch new region info from pd,
|
||||
// and re-split key range for new region. Setting retry to false will
|
||||
// stop retry and enter handleCopResponse logic, which would use RegionMiss
|
||||
// backOff strategy to wait, fetch new region and re-split key range.
|
||||
// onNotLeader is only needed when updateLeader succeeds, thus switch
|
||||
// to a new store address.
|
||||
TiRegion newRegion = this.regionManager.updateLeader(recv.getRegion(), newStoreId);
|
||||
retry =
|
||||
newRegion != null
|
||||
&& recv.onNotLeader(this.regionManager.getStoreById(newStoreId), newRegion);
|
||||
|
||||
backOffFuncType = BackOffFunction.BackOffFuncType.BoUpdateLeader;
|
||||
} else {
|
||||
logger.info(
|
||||
String.format(
|
||||
"Received zero store id, from region %d try next time",
|
||||
recv.getRegion().getId()));
|
||||
|
||||
backOffFuncType = BackOffFunction.BackOffFuncType.BoRegionMiss;
|
||||
retry = false;
|
||||
}
|
||||
|
||||
if (!retry) {
|
||||
this.regionManager.invalidateRegion(recv.getRegion());
|
||||
}
|
||||
|
||||
backOffer.doBackOff(backOffFuncType, new GrpcException(error.toString()));
|
||||
|
||||
return retry;
|
||||
} else if (error.hasStoreNotMatch()) {
|
||||
// this error is reported from raftstore:
|
||||
// store_id requested at the moment is inconsistent with that expected
|
||||
// Solution:re-fetch from PD
|
||||
long storeId = recv.getRegion().getLeader().getStoreId();
|
||||
long actualStoreId = error.getStoreNotMatch().getActualStoreId();
|
||||
logger.warn(
|
||||
String.format(
|
||||
"Store Not Match happened with region id %d, store id %d, actual store id %d",
|
||||
recv.getRegion().getId(), storeId, actualStoreId));
|
||||
|
||||
this.regionManager.invalidateRegion(recv.getRegion());
|
||||
this.regionManager.invalidateStore(storeId);
|
||||
// recv.onStoreNotMatch(this.regionManager.getStoreById(storeId));
|
||||
// assume this is a low probability error, do not retry, just re-split the request by
|
||||
// throwing it out.
|
||||
return false;
|
||||
} else if (error.hasEpochNotMatch()) {
|
||||
// this error is reported from raftstore:
|
||||
// region has outdated version,please try later.
|
||||
logger.warn(String.format("Stale Epoch encountered for region [%s]", recv.getRegion()));
|
||||
this.regionManager.onRegionStale(recv.getRegion());
|
||||
return false;
|
||||
} else if (error.hasServerIsBusy()) {
|
||||
// this error is reported from kv:
|
||||
// will occur when write pressure is high. Please try later.
|
||||
logger.warn(
|
||||
String.format(
|
||||
"Server is busy for region [%s], reason: %s",
|
||||
recv.getRegion(), error.getServerIsBusy().getReason()));
|
||||
backOffer.doBackOff(
|
||||
BackOffFunction.BackOffFuncType.BoServerBusy,
|
||||
new StatusRuntimeException(
|
||||
Status.fromCode(Status.Code.UNAVAILABLE).withDescription(error.toString())));
|
||||
backOffer.doBackOff(
|
||||
BackOffFunction.BackOffFuncType.BoRegionMiss, new GrpcException(error.getMessage()));
|
||||
return true;
|
||||
} else if (error.hasStaleCommand()) {
|
||||
// this error is reported from raftstore:
|
||||
// command outdated, please try later
|
||||
logger.warn(String.format("Stale command for region [%s]", recv.getRegion()));
|
||||
backOffer.doBackOff(
|
||||
BackOffFunction.BackOffFuncType.BoRegionMiss, new GrpcException(error.getMessage()));
|
||||
return true;
|
||||
} else if (error.hasRaftEntryTooLarge()) {
|
||||
logger.warn(String.format("Raft too large for region [%s]", recv.getRegion()));
|
||||
throw new StatusRuntimeException(
|
||||
Status.fromCode(Status.Code.UNAVAILABLE).withDescription(error.toString()));
|
||||
} else if (error.hasKeyNotInRegion()) {
|
||||
// this error is reported from raftstore:
|
||||
// key requested is not in current region
|
||||
// should not happen here.
|
||||
ByteString invalidKey = error.getKeyNotInRegion().getKey();
|
||||
logger.error(
|
||||
String.format(
|
||||
"Key not in region [%s] for key [%s], this error should not happen here.",
|
||||
recv.getRegion(), KeyUtils.formatBytesUTF8(invalidKey)));
|
||||
throw new StatusRuntimeException(Status.UNKNOWN.withDescription(error.toString()));
|
||||
}
|
||||
|
||||
logger.warn(String.format("Unknown error %s for region [%s]", error, recv.getRegion()));
|
||||
// For other errors, we only drop cache here.
|
||||
// Upper level may split this task.
|
||||
invalidateRegionStoreCache(recv.getRegion());
|
||||
// retry if raft proposal is dropped, it indicates the store is in the middle of transition
|
||||
if (error.getMessage().contains("Raft ProposalDropped")) {
|
||||
backOffer.doBackOff(
|
||||
BackOffFunction.BackOffFuncType.BoRegionMiss, new GrpcException(error.getMessage()));
|
||||
return true;
|
||||
}
|
||||
return regionHandler.handleRegionError(backOffer, error);
|
||||
} else {
|
||||
regionHandler.tryUpdateRegionStore();
|
||||
}
|
||||
|
||||
boolean retry = false;
|
||||
|
||||
// Key error handling logic
|
||||
Kvrpcpb.KeyError keyError = getKeyError.apply(resp);
|
||||
if (keyError != null) {
|
||||
try {
|
||||
Lock lock = AbstractLockResolverClient.extractLockFromKeyErr(keyError);
|
||||
resolveLock(backOffer, lock);
|
||||
retry = true;
|
||||
return true;
|
||||
} catch (KeyException e) {
|
||||
logger.warn("Unable to handle KeyExceptions other than LockException", e);
|
||||
}
|
||||
}
|
||||
return retry;
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean handleRequestError(BackOffer backOffer, Exception e) {
|
||||
regionManager.onRequestFail(recv.getRegion());
|
||||
|
||||
backOffer.doBackOff(
|
||||
BackOffFunction.BackOffFuncType.BoTiKVRPC,
|
||||
new GrpcException(
|
||||
"send tikv request error: " + e.getMessage() + ", try next peer later", e));
|
||||
// TiKV maybe down, so do not retry in `callWithRetry`
|
||||
// should re-fetch the new leader from PD and send request to it
|
||||
return false;
|
||||
return regionHandler.handleRequestError(backOffer, e);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -48,7 +48,9 @@ public class PDErrorHandler<RespT> implements ErrorHandler<RespT> {
|
|||
@Override
|
||||
public boolean handleResponseError(BackOffer backOffer, RespT resp) {
|
||||
if (resp == null) {
|
||||
return false;
|
||||
String msg = String.format("PD Request Failed with unknown reason");
|
||||
logger.warn(msg);
|
||||
return handleRequestError(backOffer, new GrpcException(msg));
|
||||
}
|
||||
PDError error = getError.apply(resp);
|
||||
if (error != null) {
|
||||
|
@ -56,7 +58,7 @@ public class PDErrorHandler<RespT> implements ErrorHandler<RespT> {
|
|||
case PD_ERROR:
|
||||
backOffer.doBackOff(
|
||||
BackOffFunction.BackOffFuncType.BoPDRPC, new GrpcException(error.toString()));
|
||||
client.updateLeader();
|
||||
client.updateLeaderOrforwardFollower();
|
||||
return true;
|
||||
case REGION_PEER_NOT_ELECTED:
|
||||
logger.debug(error.getMessage());
|
||||
|
@ -73,6 +75,7 @@ public class PDErrorHandler<RespT> implements ErrorHandler<RespT> {
|
|||
@Override
|
||||
public boolean handleRequestError(BackOffer backOffer, Exception e) {
|
||||
backOffer.doBackOff(BackOffFunction.BackOffFuncType.BoPDRPC, e);
|
||||
client.updateLeaderOrforwardFollower();
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,206 @@
|
|||
package org.tikv.common.operation;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
import io.grpc.Status;
|
||||
import io.grpc.StatusRuntimeException;
|
||||
import java.util.function.Function;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.tikv.common.codec.KeyUtils;
|
||||
import org.tikv.common.exception.GrpcException;
|
||||
import org.tikv.common.region.RegionErrorReceiver;
|
||||
import org.tikv.common.region.RegionManager;
|
||||
import org.tikv.common.region.TiRegion;
|
||||
import org.tikv.common.util.BackOffFunction;
|
||||
import org.tikv.common.util.BackOffer;
|
||||
import org.tikv.kvproto.Errorpb;
|
||||
|
||||
public class RegionErrorHandler<RespT> implements ErrorHandler<RespT> {
|
||||
private static final Logger logger = LoggerFactory.getLogger(RegionErrorHandler.class);
|
||||
// if a store does not have leader currently, store id is set to 0
|
||||
private static final int NO_LEADER_STORE_ID = 0;
|
||||
private final Function<RespT, Errorpb.Error> getRegionError;
|
||||
private final RegionManager regionManager;
|
||||
private final RegionErrorReceiver recv;
|
||||
|
||||
public RegionErrorHandler(
|
||||
RegionManager regionManager,
|
||||
RegionErrorReceiver recv,
|
||||
Function<RespT, Errorpb.Error> getRegionError) {
|
||||
this.recv = recv;
|
||||
this.regionManager = regionManager;
|
||||
this.getRegionError = getRegionError;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean handleResponseError(BackOffer backOffer, RespT resp) {
|
||||
if (resp == null) {
|
||||
String msg = String.format("Request Failed with unknown reason for [%s]", recv.getRegion());
|
||||
return handleRequestError(backOffer, new GrpcException(msg));
|
||||
}
|
||||
// Region error handling logic
|
||||
Errorpb.Error error = getRegionError(resp);
|
||||
if (error != null) {
|
||||
return handleRegionError(backOffer, error);
|
||||
} else {
|
||||
tryUpdateRegionStore();
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
public void tryUpdateRegionStore() {
|
||||
recv.tryUpdateRegionStore();
|
||||
}
|
||||
|
||||
public boolean handleRegionError(BackOffer backOffer, Errorpb.Error error) {
|
||||
if (error.hasNotLeader()) {
|
||||
// this error is reported from raftstore:
|
||||
// peer of current request is not leader, the following might be its causes:
|
||||
// 1. cache is outdated, region has changed its leader, can be solved by re-fetching from PD
|
||||
// 2. leader of current region is missing, need to wait and then fetch region info from PD
|
||||
long newStoreId = error.getNotLeader().getLeader().getStoreId();
|
||||
boolean retry;
|
||||
|
||||
// update Leader here
|
||||
logger.warn(
|
||||
String.format(
|
||||
"NotLeader Error with region id %d and store id %d, new store id %d",
|
||||
recv.getRegion().getId(), recv.getRegion().getLeader().getStoreId(), newStoreId));
|
||||
|
||||
BackOffFunction.BackOffFuncType backOffFuncType;
|
||||
// if there's current no leader, we do not trigger update pd cache logic
|
||||
// since issuing store = NO_LEADER_STORE_ID requests to pd will definitely fail.
|
||||
if (newStoreId != NO_LEADER_STORE_ID) {
|
||||
// If update leader fails, we need to fetch new region info from pd,
|
||||
// and re-split key range for new region. Setting retry to false will
|
||||
// stop retry and enter handleCopResponse logic, which would use RegionMiss
|
||||
// backOff strategy to wait, fetch new region and re-split key range.
|
||||
// onNotLeader is only needed when updateLeader succeeds, thus switch
|
||||
// to a new store address.
|
||||
TiRegion newRegion = this.regionManager.updateLeader(recv.getRegion(), newStoreId);
|
||||
retry = newRegion != null && recv.onNotLeader(newRegion);
|
||||
|
||||
backOffFuncType = BackOffFunction.BackOffFuncType.BoUpdateLeader;
|
||||
} else {
|
||||
logger.info(
|
||||
String.format(
|
||||
"Received zero store id, from region %d try next time", recv.getRegion().getId()));
|
||||
|
||||
backOffFuncType = BackOffFunction.BackOffFuncType.BoRegionMiss;
|
||||
retry = false;
|
||||
}
|
||||
|
||||
if (!retry) {
|
||||
this.regionManager.invalidateRegion(recv.getRegion());
|
||||
}
|
||||
|
||||
backOffer.doBackOff(backOffFuncType, new GrpcException(error.toString()));
|
||||
|
||||
return retry;
|
||||
} else if (error.hasStoreNotMatch()) {
|
||||
// this error is reported from raftstore:
|
||||
// store_id requested at the moment is inconsistent with that expected
|
||||
// Solution:re-fetch from PD
|
||||
long storeId = recv.getRegion().getLeader().getStoreId();
|
||||
long actualStoreId = error.getStoreNotMatch().getActualStoreId();
|
||||
logger.warn(
|
||||
String.format(
|
||||
"Store Not Match happened with region id %d, store id %d, actual store id %d",
|
||||
recv.getRegion().getId(), storeId, actualStoreId));
|
||||
|
||||
this.regionManager.invalidateRegion(recv.getRegion());
|
||||
this.regionManager.invalidateStore(storeId);
|
||||
// recv.onStoreNotMatch(this.regionManager.getStoreById(storeId));
|
||||
// assume this is a low probability error, do not retry, just re-split the request by
|
||||
// throwing it out.
|
||||
return false;
|
||||
} else if (error.hasEpochNotMatch()) {
|
||||
// this error is reported from raftstore:
|
||||
// region has outdated version,please try later.
|
||||
logger.warn(String.format("Stale Epoch encountered for region [%s]", recv.getRegion()));
|
||||
this.regionManager.onRegionStale(recv.getRegion());
|
||||
return false;
|
||||
} else if (error.hasServerIsBusy()) {
|
||||
// this error is reported from kv:
|
||||
// will occur when write pressure is high. Please try later.
|
||||
logger.warn(
|
||||
String.format(
|
||||
"Server is busy for region [%s], reason: %s",
|
||||
recv.getRegion(), error.getServerIsBusy().getReason()));
|
||||
backOffer.doBackOff(
|
||||
BackOffFunction.BackOffFuncType.BoServerBusy,
|
||||
new StatusRuntimeException(
|
||||
Status.fromCode(Status.Code.UNAVAILABLE).withDescription(error.toString())));
|
||||
backOffer.doBackOff(
|
||||
BackOffFunction.BackOffFuncType.BoRegionMiss, new GrpcException(error.getMessage()));
|
||||
return true;
|
||||
} else if (error.hasStaleCommand()) {
|
||||
// this error is reported from raftstore:
|
||||
// command outdated, please try later
|
||||
logger.warn(String.format("Stale command for region [%s]", recv.getRegion()));
|
||||
backOffer.doBackOff(
|
||||
BackOffFunction.BackOffFuncType.BoRegionMiss, new GrpcException(error.getMessage()));
|
||||
return true;
|
||||
} else if (error.hasRaftEntryTooLarge()) {
|
||||
logger.warn(String.format("Raft too large for region [%s]", recv.getRegion()));
|
||||
throw new StatusRuntimeException(
|
||||
Status.fromCode(Status.Code.UNAVAILABLE).withDescription(error.toString()));
|
||||
} else if (error.hasKeyNotInRegion()) {
|
||||
// this error is reported from raftstore:
|
||||
// key requested is not in current region
|
||||
// should not happen here.
|
||||
ByteString invalidKey = error.getKeyNotInRegion().getKey();
|
||||
logger.error(
|
||||
String.format(
|
||||
"Key not in region [%s] for key [%s], this error should not happen here.",
|
||||
recv.getRegion(), KeyUtils.formatBytesUTF8(invalidKey)));
|
||||
regionManager.clearRegionCache();
|
||||
throw new StatusRuntimeException(Status.UNKNOWN.withDescription(error.toString()));
|
||||
}
|
||||
|
||||
logger.warn(String.format("Unknown error %s for region [%s]", error, recv.getRegion()));
|
||||
// For other errors, we only drop cache here.
|
||||
// Upper level may split this task.
|
||||
invalidateRegionStoreCache(recv.getRegion());
|
||||
// retry if raft proposal is dropped, it indicates the store is in the middle of transition
|
||||
if (error.getMessage().contains("Raft ProposalDropped")) {
|
||||
backOffer.doBackOff(
|
||||
BackOffFunction.BackOffFuncType.BoRegionMiss, new GrpcException(error.getMessage()));
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean handleRequestError(BackOffer backOffer, Exception e) {
|
||||
if (recv.onStoreUnreachable()) {
|
||||
backOffer.doBackOff(BackOffFunction.BackOffFuncType.BoTiKVRPC, e);
|
||||
return true;
|
||||
}
|
||||
|
||||
logger.warn("request failed because of: " + e.getMessage());
|
||||
backOffer.doBackOff(
|
||||
BackOffFunction.BackOffFuncType.BoTiKVRPC,
|
||||
new GrpcException(
|
||||
"send tikv request error: " + e.getMessage() + ", try next peer later", e));
|
||||
// TiKV maybe down, so do not retry in `callWithRetry`
|
||||
// should re-fetch the new leader from PD and send request to it
|
||||
return false;
|
||||
}
|
||||
|
||||
public Errorpb.Error getRegionError(RespT resp) {
|
||||
if (getRegionError != null) {
|
||||
return getRegionError.apply(resp);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
public TiRegion getRegion() {
|
||||
return recv.getRegion();
|
||||
}
|
||||
|
||||
private void invalidateRegionStoreCache(TiRegion ctxRegion) {
|
||||
regionManager.invalidateRegion(ctxRegion);
|
||||
regionManager.invalidateStore(ctxRegion.getLeader().getStoreId());
|
||||
}
|
||||
}
|
|
@ -27,11 +27,11 @@ import org.tikv.common.key.Key;
|
|||
import org.tikv.common.region.RegionStoreClient;
|
||||
import org.tikv.common.region.RegionStoreClient.RegionStoreClientBuilder;
|
||||
import org.tikv.common.region.TiRegion;
|
||||
import org.tikv.common.region.TiStore;
|
||||
import org.tikv.common.util.BackOffer;
|
||||
import org.tikv.common.util.ConcreteBackOffer;
|
||||
import org.tikv.common.util.Pair;
|
||||
import org.tikv.kvproto.Kvrpcpb;
|
||||
import org.tikv.kvproto.Metapb;
|
||||
|
||||
public class ConcreteScanIterator extends ScanIterator {
|
||||
private final long version;
|
||||
|
@ -82,10 +82,10 @@ public class ConcreteScanIterator extends ScanIterator {
|
|||
|
||||
private ByteString resolveCurrentLock(Kvrpcpb.KvPair current) {
|
||||
logger.warn(String.format("resolve current key error %s", current.getError().toString()));
|
||||
Pair<TiRegion, Metapb.Store> pair =
|
||||
Pair<TiRegion, TiStore> pair =
|
||||
builder.getRegionManager().getRegionStorePairByKey(current.getKey());
|
||||
TiRegion region = pair.first;
|
||||
Metapb.Store store = pair.second;
|
||||
TiStore store = pair.second;
|
||||
BackOffer backOffer = ConcreteBackOffer.newGetBackOff();
|
||||
try (RegionStoreClient client = builder.build(region, store)) {
|
||||
return client.get(backOffer, current.getKey(), version);
|
||||
|
|
|
@ -27,7 +27,11 @@ import java.util.List;
|
|||
import org.tikv.common.TiSession;
|
||||
import org.tikv.common.codec.Codec.IntegerCodec;
|
||||
import org.tikv.common.codec.CodecDataInput;
|
||||
import org.tikv.common.columnar.*;
|
||||
import org.tikv.common.columnar.BatchedTiChunkColumnVector;
|
||||
import org.tikv.common.columnar.TiChunk;
|
||||
import org.tikv.common.columnar.TiChunkColumnVector;
|
||||
import org.tikv.common.columnar.TiColumnVector;
|
||||
import org.tikv.common.columnar.TiRowColumnVector;
|
||||
import org.tikv.common.columnar.datatypes.CHType;
|
||||
import org.tikv.common.meta.TiDAGRequest;
|
||||
import org.tikv.common.operation.SchemaInfer;
|
||||
|
|
|
@ -32,12 +32,12 @@ import org.tikv.common.meta.TiDAGRequest.PushDownType;
|
|||
import org.tikv.common.operation.SchemaInfer;
|
||||
import org.tikv.common.region.RegionStoreClient;
|
||||
import org.tikv.common.region.TiRegion;
|
||||
import org.tikv.common.region.TiStore;
|
||||
import org.tikv.common.region.TiStoreType;
|
||||
import org.tikv.common.util.BackOffer;
|
||||
import org.tikv.common.util.ConcreteBackOffer;
|
||||
import org.tikv.common.util.RangeSplitter;
|
||||
import org.tikv.kvproto.Coprocessor;
|
||||
import org.tikv.kvproto.Metapb;
|
||||
|
||||
public abstract class DAGIterator<T>
|
||||
extends org.tikv.common.operation.iterator.CoprocessorIterator<T> {
|
||||
|
@ -204,7 +204,7 @@ public abstract class DAGIterator<T>
|
|||
}
|
||||
List<Coprocessor.KeyRange> ranges = task.getRanges();
|
||||
TiRegion region = task.getRegion();
|
||||
Metapb.Store store = task.getStore();
|
||||
TiStore store = task.getStore();
|
||||
|
||||
try {
|
||||
RegionStoreClient client =
|
||||
|
@ -246,7 +246,7 @@ public abstract class DAGIterator<T>
|
|||
private Iterator<SelectResponse> processByStreaming(RangeSplitter.RegionTask regionTask) {
|
||||
List<Coprocessor.KeyRange> ranges = regionTask.getRanges();
|
||||
TiRegion region = regionTask.getRegion();
|
||||
Metapb.Store store = regionTask.getStore();
|
||||
TiStore store = regionTask.getStore();
|
||||
|
||||
RegionStoreClient client;
|
||||
try {
|
||||
|
|
|
@ -78,26 +78,22 @@ public class RawScanIterator extends ScanIterator {
|
|||
endOfScan = true;
|
||||
return false;
|
||||
}
|
||||
// continue when cache is empty but not null
|
||||
while (currentCache != null && currentCache.isEmpty()) {
|
||||
if (cacheLoadFails()) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return notEndOfScan();
|
||||
}
|
||||
|
||||
private Kvrpcpb.KvPair getCurrent() {
|
||||
if (isCacheDrained()) {
|
||||
return null;
|
||||
}
|
||||
--limit;
|
||||
return currentCache.get(index++);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Kvrpcpb.KvPair next() {
|
||||
Kvrpcpb.KvPair kv;
|
||||
// continue when cache is empty but not null
|
||||
for (kv = getCurrent(); currentCache != null && kv == null; kv = getCurrent()) {
|
||||
if (cacheLoadFails()) {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
return kv;
|
||||
return getCurrent();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -50,11 +50,8 @@ public abstract class ScanIterator implements Iterator<Kvrpcpb.KvPair> {
|
|||
int limit,
|
||||
boolean keyOnly) {
|
||||
this.startKey = requireNonNull(startKey, "start key is null");
|
||||
if (startKey.isEmpty()) {
|
||||
throw new IllegalArgumentException("start key cannot be empty");
|
||||
}
|
||||
this.endKey = Key.toRawKey(requireNonNull(endKey, "end key is null"));
|
||||
this.hasEndKey = !endKey.equals(ByteString.EMPTY);
|
||||
this.hasEndKey = !endKey.isEmpty();
|
||||
this.limit = limit;
|
||||
this.keyOnly = keyOnly;
|
||||
this.conf = conf;
|
||||
|
@ -74,7 +71,7 @@ public abstract class ScanIterator implements Iterator<Kvrpcpb.KvPair> {
|
|||
if (endOfScan || processingLastBatch) {
|
||||
return true;
|
||||
}
|
||||
if (startKey == null || startKey.isEmpty()) {
|
||||
if (startKey == null) {
|
||||
return true;
|
||||
}
|
||||
try {
|
||||
|
@ -107,7 +104,8 @@ public abstract class ScanIterator implements Iterator<Kvrpcpb.KvPair> {
|
|||
startKey = lastKey.next().toByteString();
|
||||
}
|
||||
// notify last batch if lastKey is greater than or equal to endKey
|
||||
if (hasEndKey && lastKey.compareTo(endKey) >= 0) {
|
||||
// if startKey is empty, it indicates +∞
|
||||
if (hasEndKey && lastKey.compareTo(endKey) >= 0 || startKey.isEmpty()) {
|
||||
processingLastBatch = true;
|
||||
startKey = null;
|
||||
}
|
||||
|
|
|
@ -17,6 +17,7 @@ package org.tikv.common.policy;
|
|||
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import io.grpc.Status;
|
||||
import io.prometheus.client.Counter;
|
||||
import io.prometheus.client.Histogram;
|
||||
import java.util.concurrent.Callable;
|
||||
import org.tikv.common.exception.GrpcException;
|
||||
|
@ -32,6 +33,12 @@ public abstract class RetryPolicy<RespT> {
|
|||
.help("grpc request latency.")
|
||||
.labelNames("type")
|
||||
.register();
|
||||
public static final Counter GRPC_REQUEST_RETRY_NUM =
|
||||
Counter.build()
|
||||
.name("client_java_grpc_requests_retry_num")
|
||||
.help("grpc request retry num.")
|
||||
.labelNames("type")
|
||||
.register();
|
||||
|
||||
// handles PD and TiKV's error.
|
||||
private ErrorHandler<RespT> handler;
|
||||
|
@ -70,7 +77,10 @@ public abstract class RetryPolicy<RespT> {
|
|||
// Handle request call error
|
||||
boolean retry = handler.handleRequestError(backOffer, e);
|
||||
if (retry) {
|
||||
GRPC_REQUEST_RETRY_NUM.labels(methodName).inc();
|
||||
continue;
|
||||
} else {
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -78,7 +88,7 @@ public abstract class RetryPolicy<RespT> {
|
|||
if (handler != null) {
|
||||
boolean retry = handler.handleResponseError(backOffer, result);
|
||||
if (retry) {
|
||||
// add retry counter
|
||||
GRPC_REQUEST_RETRY_NUM.labels(methodName).inc();
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,24 +21,38 @@ import static com.google.common.base.Preconditions.checkArgument;
|
|||
import static com.google.common.base.Preconditions.checkNotNull;
|
||||
|
||||
import io.grpc.ManagedChannel;
|
||||
import io.grpc.Metadata;
|
||||
import io.grpc.stub.MetadataUtils;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.tikv.common.AbstractGRPCClient;
|
||||
import org.tikv.common.TiConfiguration;
|
||||
import org.tikv.common.exception.GrpcException;
|
||||
import org.tikv.common.util.ChannelFactory;
|
||||
import org.tikv.kvproto.Kvrpcpb;
|
||||
import org.tikv.kvproto.Metapb;
|
||||
import org.tikv.kvproto.TikvGrpc;
|
||||
|
||||
public abstract class AbstractRegionStoreClient
|
||||
extends AbstractGRPCClient<TikvGrpc.TikvBlockingStub, TikvGrpc.TikvStub>
|
||||
implements RegionErrorReceiver {
|
||||
private static final Logger logger = LoggerFactory.getLogger(AbstractRegionStoreClient.class);
|
||||
|
||||
protected final RegionManager regionManager;
|
||||
protected TiRegion region;
|
||||
protected TiStore targetStore;
|
||||
protected TiStore originStore;
|
||||
private long retryForwardTimes;
|
||||
private long retryLeaderTimes;
|
||||
private Metapb.Peer candidateLeader;
|
||||
|
||||
protected AbstractRegionStoreClient(
|
||||
TiConfiguration conf,
|
||||
TiRegion region,
|
||||
TiStore store,
|
||||
ChannelFactory channelFactory,
|
||||
TikvGrpc.TikvBlockingStub blockingStub,
|
||||
TikvGrpc.TikvStub asyncStub,
|
||||
|
@ -49,8 +63,19 @@ public abstract class AbstractRegionStoreClient
|
|||
checkArgument(region.getLeader() != null, "Leader Peer is null");
|
||||
this.region = region;
|
||||
this.regionManager = regionManager;
|
||||
this.targetStore = store;
|
||||
this.originStore = null;
|
||||
this.candidateLeader = null;
|
||||
this.retryForwardTimes = 0;
|
||||
this.retryLeaderTimes = 0;
|
||||
if (this.targetStore.getProxyStore() != null) {
|
||||
this.timeout = conf.getForwardTimeout();
|
||||
} else if (!this.targetStore.isReachable() && !this.targetStore.canForwardFirst()) {
|
||||
onStoreUnreachable();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public TiRegion getRegion() {
|
||||
return region;
|
||||
}
|
||||
|
@ -71,43 +96,218 @@ public abstract class AbstractRegionStoreClient
|
|||
/**
|
||||
* onNotLeader deals with NotLeaderError and returns whether re-splitting key range is needed
|
||||
*
|
||||
* @param newStore the new store presented by NotLeader Error
|
||||
* @param newRegion the new region presented by NotLeader Error
|
||||
* @return false when re-split is needed.
|
||||
*/
|
||||
@Override
|
||||
public boolean onNotLeader(Metapb.Store newStore, TiRegion newRegion) {
|
||||
public boolean onNotLeader(TiRegion newRegion) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug(region + ", new leader = " + newStore.getId());
|
||||
logger.debug(region + ", new leader = " + newRegion.getLeader().getStoreId());
|
||||
}
|
||||
// When switch leader fails or the region changed its region epoch,
|
||||
// it would be necessary to re-split task's key range for new region.
|
||||
if (!region.getRegionEpoch().equals(newRegion.getRegionEpoch())) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// If we try one peer but find the leader has not changed, we do not need try other peers.
|
||||
if (candidateLeader != null
|
||||
&& region.getLeader().getStoreId() == newRegion.getLeader().getStoreId()) {
|
||||
retryLeaderTimes = newRegion.getFollowerList().size();
|
||||
originStore = null;
|
||||
}
|
||||
candidateLeader = null;
|
||||
region = newRegion;
|
||||
String addressStr = regionManager.getStoreById(region.getLeader().getStoreId()).getAddress();
|
||||
ManagedChannel channel =
|
||||
channelFactory.getChannel(addressStr, regionManager.getPDClient().getHostMapping());
|
||||
blockingStub = TikvGrpc.newBlockingStub(channel);
|
||||
asyncStub = TikvGrpc.newStub(channel);
|
||||
targetStore = regionManager.getStoreById(region.getLeader().getStoreId());
|
||||
updateClientStub();
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onStoreNotMatch(Metapb.Store store) {
|
||||
String addressStr = store.getAddress();
|
||||
public boolean onStoreUnreachable() {
|
||||
if (!targetStore.isValid()) {
|
||||
logger.warn(
|
||||
String.format("store [%d] has been invalid", region.getId(), targetStore.getId()));
|
||||
targetStore = regionManager.getStoreById(targetStore.getId());
|
||||
updateClientStub();
|
||||
return true;
|
||||
}
|
||||
|
||||
if (targetStore.getProxyStore() == null) {
|
||||
if (targetStore.isReachable()) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
// If this store has failed to forward request too many times, we shall try other peer at first
|
||||
// so that we can
|
||||
// reduce the latency cost by fail requests.
|
||||
if (targetStore.canForwardFirst()) {
|
||||
if (conf.getEnableGrpcForward() && retryForwardTimes <= region.getFollowerList().size()) {
|
||||
return retryOtherStoreByProxyForward();
|
||||
}
|
||||
if (retryOtherStoreLeader()) {
|
||||
return true;
|
||||
}
|
||||
} else {
|
||||
if (retryOtherStoreLeader()) {
|
||||
return true;
|
||||
}
|
||||
if (conf.getEnableGrpcForward() && retryForwardTimes <= region.getFollowerList().size()) {
|
||||
return retryOtherStoreByProxyForward();
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
logger.warn(
|
||||
String.format(
|
||||
"retry time exceed for region[%d], invalid this region[%d]",
|
||||
region.getId(), targetStore.getId()));
|
||||
regionManager.onRequestFail(region);
|
||||
return false;
|
||||
}
|
||||
|
||||
protected Kvrpcpb.Context makeContext(TiStoreType storeType) {
|
||||
if (candidateLeader != null && storeType == TiStoreType.TiKV) {
|
||||
return region.getReplicaContext(candidateLeader, java.util.Collections.emptySet());
|
||||
} else {
|
||||
return region.getReplicaContext(java.util.Collections.emptySet(), storeType);
|
||||
}
|
||||
}
|
||||
|
||||
protected Kvrpcpb.Context makeContext(Set<Long> resolvedLocks, TiStoreType storeType) {
|
||||
if (candidateLeader != null && storeType == TiStoreType.TiKV) {
|
||||
return region.getReplicaContext(candidateLeader, resolvedLocks);
|
||||
} else {
|
||||
return region.getReplicaContext(resolvedLocks, storeType);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void tryUpdateRegionStore() {
|
||||
if (originStore != null) {
|
||||
if (originStore.getId() == targetStore.getId()) {
|
||||
logger.warn(
|
||||
String.format(
|
||||
"update store [%s] by proxy-store [%s]",
|
||||
targetStore.getStore().getAddress(), targetStore.getProxyStore().getAddress()));
|
||||
// We do not need to mark the store can-forward, because if one store has grpc forward
|
||||
// successfully, it will
|
||||
// create a new store object, which is can-forward.
|
||||
regionManager.updateStore(originStore, targetStore);
|
||||
} else {
|
||||
// If we try to forward request to leader by follower failed, it means that the store of old
|
||||
// leader may be
|
||||
// unavailable but the new leader has not been report to PD. So we can ban this store for a
|
||||
// short time to
|
||||
// avoid too many request try forward rather than try other peer.
|
||||
originStore.forwardFail();
|
||||
}
|
||||
}
|
||||
if (candidateLeader != null) {
|
||||
logger.warn(
|
||||
String.format(
|
||||
"update leader to store [%d] for region[%d]",
|
||||
candidateLeader.getStoreId(), region.getId()));
|
||||
this.regionManager.updateLeader(region, candidateLeader.getStoreId());
|
||||
}
|
||||
}
|
||||
|
||||
private boolean retryOtherStoreLeader() {
|
||||
List<Metapb.Peer> peers = region.getFollowerList();
|
||||
if (retryLeaderTimes >= peers.size()) {
|
||||
return false;
|
||||
}
|
||||
retryLeaderTimes += 1;
|
||||
boolean hasVisitedStore = false;
|
||||
for (Metapb.Peer cur : peers) {
|
||||
if (candidateLeader == null || hasVisitedStore) {
|
||||
TiStore store = regionManager.getStoreById(cur.getStoreId());
|
||||
if (store != null && store.isReachable()) {
|
||||
targetStore = store;
|
||||
candidateLeader = cur;
|
||||
logger.warn(
|
||||
String.format(
|
||||
"try store [%d],peer[%d] for region[%d], which may be new leader",
|
||||
targetStore.getId(), candidateLeader.getId(), region.getId()));
|
||||
updateClientStub();
|
||||
return true;
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
} else if (candidateLeader.getId() == cur.getId()) {
|
||||
hasVisitedStore = true;
|
||||
}
|
||||
}
|
||||
candidateLeader = null;
|
||||
retryLeaderTimes = peers.size();
|
||||
return false;
|
||||
}
|
||||
|
||||
private void updateClientStub() {
|
||||
String addressStr = targetStore.getStore().getAddress();
|
||||
if (targetStore.getProxyStore() != null) {
|
||||
addressStr = targetStore.getProxyStore().getAddress();
|
||||
}
|
||||
ManagedChannel channel =
|
||||
channelFactory.getChannel(addressStr, regionManager.getPDClient().getHostMapping());
|
||||
blockingStub = TikvGrpc.newBlockingStub(channel);
|
||||
asyncStub = TikvGrpc.newStub(channel);
|
||||
if (region.getLeader().getStoreId() != store.getId()) {
|
||||
logger.warn(
|
||||
"store_not_match may occur? "
|
||||
+ region
|
||||
+ ", original store = "
|
||||
+ store.getId()
|
||||
+ " address = "
|
||||
+ addressStr);
|
||||
if (targetStore.getProxyStore() != null) {
|
||||
Metadata header = new Metadata();
|
||||
header.put(TiConfiguration.FORWARD_META_DATA_KEY, targetStore.getStore().getAddress());
|
||||
blockingStub = MetadataUtils.attachHeaders(blockingStub, header);
|
||||
asyncStub = MetadataUtils.attachHeaders(asyncStub, header);
|
||||
}
|
||||
}
|
||||
|
||||
private boolean retryOtherStoreByProxyForward() {
|
||||
TiStore proxyStore = switchProxyStore();
|
||||
if (proxyStore == null) {
|
||||
logger.warn(
|
||||
String.format(
|
||||
"no forward store can be selected for store [%s] and region[%d]",
|
||||
targetStore.getStore().getAddress(), region.getId()));
|
||||
return false;
|
||||
}
|
||||
if (originStore == null) {
|
||||
originStore = targetStore;
|
||||
if (this.targetStore.getProxyStore() != null) {
|
||||
this.timeout = conf.getForwardTimeout();
|
||||
}
|
||||
}
|
||||
targetStore = proxyStore;
|
||||
retryForwardTimes += 1;
|
||||
updateClientStub();
|
||||
logger.warn(
|
||||
String.format(
|
||||
"forward request to store [%s] by store [%s] for region[%d]",
|
||||
targetStore.getStore().getAddress(),
|
||||
targetStore.getProxyStore().getAddress(),
|
||||
region.getId()));
|
||||
return true;
|
||||
}
|
||||
|
||||
private TiStore switchProxyStore() {
|
||||
boolean hasVisitedStore = false;
|
||||
List<Metapb.Peer> peers = region.getFollowerList();
|
||||
if (peers.isEmpty()) {
|
||||
return null;
|
||||
}
|
||||
Metapb.Store proxyStore = targetStore.getProxyStore();
|
||||
if (proxyStore == null || peers.get(peers.size() - 1).getStoreId() == proxyStore.getId()) {
|
||||
hasVisitedStore = true;
|
||||
}
|
||||
for (Metapb.Peer peer : peers) {
|
||||
if (hasVisitedStore) {
|
||||
TiStore store = regionManager.getStoreById(peer.getStoreId());
|
||||
if (store.isReachable()) {
|
||||
return targetStore.withProxy(store.getStore());
|
||||
}
|
||||
} else if (peer.getStoreId() == proxyStore.getId()) {
|
||||
hasVisitedStore = true;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,157 @@
|
|||
package org.tikv.common.region;
|
||||
|
||||
import static org.tikv.common.codec.KeyUtils.formatBytesUTF8;
|
||||
import static org.tikv.common.util.KeyRangeUtils.makeRange;
|
||||
|
||||
import com.google.common.collect.RangeMap;
|
||||
import com.google.common.collect.TreeRangeMap;
|
||||
import com.google.protobuf.ByteString;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.tikv.common.key.Key;
|
||||
import org.tikv.common.util.BackOffer;
|
||||
|
||||
public class RegionCache {
|
||||
private static final Logger logger = LoggerFactory.getLogger(RegionCache.class);
|
||||
|
||||
private final Map<Long, TiRegion> regionCache;
|
||||
private final Map<Long, TiStore> storeCache;
|
||||
private final RangeMap<Key, Long> keyToRegionIdCache;
|
||||
|
||||
public RegionCache() {
|
||||
regionCache = new HashMap<>();
|
||||
storeCache = new HashMap<>();
|
||||
|
||||
keyToRegionIdCache = TreeRangeMap.create();
|
||||
}
|
||||
|
||||
public synchronized TiRegion getRegionByKey(ByteString key, BackOffer backOffer) {
|
||||
Long regionId;
|
||||
if (key.isEmpty()) {
|
||||
// if key is empty, it must be the start key.
|
||||
regionId = keyToRegionIdCache.get(Key.toRawKey(key, true));
|
||||
} else {
|
||||
regionId = keyToRegionIdCache.get(Key.toRawKey(key));
|
||||
}
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug(
|
||||
String.format("getRegionByKey key[%s] -> ID[%s]", formatBytesUTF8(key), regionId));
|
||||
}
|
||||
|
||||
if (regionId == null) {
|
||||
return null;
|
||||
}
|
||||
TiRegion region;
|
||||
region = regionCache.get(regionId);
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug(String.format("getRegionByKey ID[%s] -> Region[%s]", regionId, region));
|
||||
}
|
||||
return region;
|
||||
}
|
||||
|
||||
public synchronized TiRegion putRegion(TiRegion region) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("putRegion: " + region);
|
||||
}
|
||||
TiRegion oldRegion = regionCache.get(region.getId());
|
||||
if (oldRegion != null) {
|
||||
if (oldRegion.getMeta().equals(region.getMeta())) {
|
||||
return oldRegion;
|
||||
} else {
|
||||
invalidateRegion(oldRegion);
|
||||
}
|
||||
}
|
||||
regionCache.put(region.getId(), region);
|
||||
keyToRegionIdCache.put(makeRange(region.getStartKey(), region.getEndKey()), region.getId());
|
||||
return region;
|
||||
}
|
||||
|
||||
@Deprecated
|
||||
public synchronized TiRegion getRegionById(long regionId) {
|
||||
TiRegion region = regionCache.get(regionId);
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug(String.format("getRegionByKey ID[%s] -> Region[%s]", regionId, region));
|
||||
}
|
||||
return region;
|
||||
}
|
||||
|
||||
/** Removes region associated with regionId from regionCache. */
|
||||
public synchronized void invalidateRegion(TiRegion region) {
|
||||
try {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug(String.format("invalidateRegion ID[%s]", region.getId()));
|
||||
}
|
||||
TiRegion oldRegion = regionCache.get(region.getId());
|
||||
if (oldRegion != null && oldRegion == region) {
|
||||
keyToRegionIdCache.remove(makeRange(region.getStartKey(), region.getEndKey()));
|
||||
regionCache.remove(region.getId());
|
||||
}
|
||||
} catch (Exception ignore) {
|
||||
}
|
||||
}
|
||||
|
||||
public synchronized boolean updateRegion(TiRegion expected, TiRegion region) {
|
||||
try {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug(String.format("invalidateRegion ID[%s]", region.getId()));
|
||||
}
|
||||
TiRegion oldRegion = regionCache.get(region.getId());
|
||||
if (!expected.getMeta().equals(oldRegion.getMeta())) {
|
||||
return false;
|
||||
} else {
|
||||
if (oldRegion != null) {
|
||||
keyToRegionIdCache.remove(makeRange(oldRegion.getStartKey(), oldRegion.getEndKey()));
|
||||
}
|
||||
regionCache.put(region.getId(), region);
|
||||
keyToRegionIdCache.put(makeRange(region.getStartKey(), region.getEndKey()), region.getId());
|
||||
return true;
|
||||
}
|
||||
} catch (Exception ignore) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
public synchronized boolean updateStore(TiStore oldStore, TiStore newStore) {
|
||||
if (!newStore.isValid()) {
|
||||
return false;
|
||||
}
|
||||
TiStore originStore = storeCache.get(oldStore.getId());
|
||||
if (originStore == oldStore) {
|
||||
storeCache.put(newStore.getId(), newStore);
|
||||
oldStore.markInvalid();
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
public synchronized void invalidateStore(long storeId) {
|
||||
TiStore store = storeCache.remove(storeId);
|
||||
if (store != null) {
|
||||
store.markInvalid();
|
||||
}
|
||||
}
|
||||
|
||||
public synchronized TiStore getStoreById(long id) {
|
||||
return storeCache.get(id);
|
||||
}
|
||||
|
||||
public synchronized boolean putStore(long id, TiStore store) {
|
||||
TiStore oldStore = storeCache.get(id);
|
||||
if (oldStore != null) {
|
||||
if (oldStore.equals(store)) {
|
||||
return false;
|
||||
} else {
|
||||
oldStore.markInvalid();
|
||||
}
|
||||
}
|
||||
storeCache.put(id, store);
|
||||
return true;
|
||||
}
|
||||
|
||||
public synchronized void clearAll() {
|
||||
keyToRegionIdCache.clear();
|
||||
regionCache.clear();
|
||||
}
|
||||
}
|
|
@ -17,12 +17,13 @@
|
|||
|
||||
package org.tikv.common.region;
|
||||
|
||||
import org.tikv.kvproto.Metapb.Store;
|
||||
|
||||
public interface RegionErrorReceiver {
|
||||
boolean onNotLeader(Store store, TiRegion region);
|
||||
boolean onNotLeader(TiRegion region);
|
||||
|
||||
void onStoreNotMatch(Store store);
|
||||
/// return whether we need to retry this request.
|
||||
boolean onStoreUnreachable();
|
||||
|
||||
void tryUpdateRegionStore();
|
||||
|
||||
TiRegion getRegion();
|
||||
}
|
||||
|
|
|
@ -18,69 +18,75 @@
|
|||
package org.tikv.common.region;
|
||||
|
||||
import static org.tikv.common.codec.KeyUtils.formatBytesUTF8;
|
||||
import static org.tikv.common.util.KeyRangeUtils.makeRange;
|
||||
|
||||
import com.google.common.collect.RangeMap;
|
||||
import com.google.common.collect.TreeRangeMap;
|
||||
import com.google.protobuf.ByteString;
|
||||
import io.prometheus.client.Histogram;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.function.Function;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.ScheduledExecutorService;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.stream.Collectors;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.tikv.common.ReadOnlyPDClient;
|
||||
import org.tikv.common.event.CacheInvalidateEvent;
|
||||
import org.tikv.common.TiConfiguration;
|
||||
import org.tikv.common.exception.GrpcException;
|
||||
import org.tikv.common.exception.TiClientInternalException;
|
||||
import org.tikv.common.key.Key;
|
||||
import org.tikv.common.util.BackOffer;
|
||||
import org.tikv.common.util.ChannelFactory;
|
||||
import org.tikv.common.util.ConcreteBackOffer;
|
||||
import org.tikv.common.util.Pair;
|
||||
import org.tikv.kvproto.Metapb;
|
||||
import org.tikv.kvproto.Metapb.Peer;
|
||||
import org.tikv.kvproto.Metapb.Store;
|
||||
import org.tikv.kvproto.Metapb.StoreState;
|
||||
|
||||
@SuppressWarnings("UnstableApiUsage")
|
||||
public class RegionManager {
|
||||
private static final Logger logger = LoggerFactory.getLogger(RegionManager.class);
|
||||
// TODO: the region cache logic need rewrite.
|
||||
// https://github.com/pingcap/tispark/issues/1170
|
||||
private final RegionCache cache;
|
||||
private final boolean isReplicaRead;
|
||||
|
||||
private final Function<CacheInvalidateEvent, Void> cacheInvalidateCallback;
|
||||
|
||||
public static final Histogram GET_REGION_BY_KEY_REQUEST_LATENCY =
|
||||
Histogram.build()
|
||||
.name("client_java_get_region_by_requests_latency")
|
||||
.help("getRegionByKey request latency.")
|
||||
.register();
|
||||
|
||||
// To avoid double retrieval, we used the async version of grpc
|
||||
// When rpc not returned, instead of call again, it wait for previous one done
|
||||
// TODO: the region cache logic need rewrite.
|
||||
// https://github.com/pingcap/tispark/issues/1170
|
||||
private final RegionCache cache;
|
||||
private final ReadOnlyPDClient pdClient;
|
||||
private final TiConfiguration conf;
|
||||
private final ScheduledExecutorService executor;
|
||||
private final StoreHealthyChecker storeChecker;
|
||||
|
||||
public RegionManager(
|
||||
ReadOnlyPDClient pdClient, Function<CacheInvalidateEvent, Void> cacheInvalidateCallback) {
|
||||
this.cache = new RegionCache(pdClient);
|
||||
this.isReplicaRead = pdClient.isReplicaRead();
|
||||
this.cacheInvalidateCallback = cacheInvalidateCallback;
|
||||
TiConfiguration conf, ReadOnlyPDClient pdClient, ChannelFactory channelFactory) {
|
||||
this.cache = new RegionCache();
|
||||
this.pdClient = pdClient;
|
||||
this.conf = conf;
|
||||
long period = conf.getHealthCheckPeriodDuration();
|
||||
StoreHealthyChecker storeChecker =
|
||||
new StoreHealthyChecker(
|
||||
channelFactory, pdClient, this.cache, conf.getGrpcHealthCheckTimeout());
|
||||
this.storeChecker = storeChecker;
|
||||
this.executor = Executors.newScheduledThreadPool(1);
|
||||
this.executor.scheduleAtFixedRate(storeChecker, period, period, TimeUnit.MILLISECONDS);
|
||||
}
|
||||
|
||||
public RegionManager(ReadOnlyPDClient pdClient) {
|
||||
this.cache = new RegionCache(pdClient);
|
||||
this.isReplicaRead = pdClient.isReplicaRead();
|
||||
this.cacheInvalidateCallback = null;
|
||||
public RegionManager(TiConfiguration conf, ReadOnlyPDClient pdClient) {
|
||||
this.cache = new RegionCache();
|
||||
this.pdClient = pdClient;
|
||||
this.conf = conf;
|
||||
this.storeChecker = null;
|
||||
this.executor = null;
|
||||
}
|
||||
|
||||
public Function<CacheInvalidateEvent, Void> getCacheInvalidateCallback() {
|
||||
return cacheInvalidateCallback;
|
||||
public synchronized void close() {
|
||||
if (this.executor != null) {
|
||||
this.executor.shutdownNow();
|
||||
}
|
||||
}
|
||||
|
||||
public ReadOnlyPDClient getPDClient() {
|
||||
return this.cache.pdClient;
|
||||
return this.pdClient;
|
||||
}
|
||||
|
||||
public TiRegion getRegionByKey(ByteString key) {
|
||||
|
@ -88,7 +94,20 @@ public class RegionManager {
|
|||
}
|
||||
|
||||
public TiRegion getRegionByKey(ByteString key, BackOffer backOffer) {
|
||||
return cache.getRegionByKey(key, backOffer);
|
||||
Histogram.Timer requestTimer = GET_REGION_BY_KEY_REQUEST_LATENCY.startTimer();
|
||||
TiRegion region = cache.getRegionByKey(key, backOffer);
|
||||
try {
|
||||
if (region == null) {
|
||||
logger.debug("Key not found in keyToRegionIdCache:" + formatBytesUTF8(key));
|
||||
Pair<Metapb.Region, Metapb.Peer> regionAndLeader = pdClient.getRegionByKey(backOffer, key);
|
||||
region =
|
||||
cache.putRegion(createRegion(regionAndLeader.first, regionAndLeader.second, backOffer));
|
||||
}
|
||||
} finally {
|
||||
requestTimer.observeDuration();
|
||||
}
|
||||
|
||||
return region;
|
||||
}
|
||||
|
||||
@Deprecated
|
||||
|
@ -99,45 +118,48 @@ public class RegionManager {
|
|||
// Consider region A, B. After merge of (A, B) -> A, region ID B does not exist.
|
||||
// This request is unrecoverable.
|
||||
public TiRegion getRegionById(long regionId) {
|
||||
return cache.getRegionById(ConcreteBackOffer.newGetBackOff(), regionId);
|
||||
BackOffer backOffer = ConcreteBackOffer.newGetBackOff();
|
||||
TiRegion region = cache.getRegionById(regionId);
|
||||
if (region == null) {
|
||||
Pair<Metapb.Region, Metapb.Peer> regionAndLeader =
|
||||
pdClient.getRegionByID(backOffer, regionId);
|
||||
region = createRegion(regionAndLeader.first, regionAndLeader.second, backOffer);
|
||||
return cache.putRegion(region);
|
||||
}
|
||||
return region;
|
||||
}
|
||||
|
||||
public Pair<TiRegion, Store> getRegionStorePairByKey(ByteString key, BackOffer backOffer) {
|
||||
public Pair<TiRegion, TiStore> getRegionStorePairByKey(ByteString key, BackOffer backOffer) {
|
||||
return getRegionStorePairByKey(key, TiStoreType.TiKV, backOffer);
|
||||
}
|
||||
|
||||
public Pair<TiRegion, Store> getRegionStorePairByKey(ByteString key) {
|
||||
public Pair<TiRegion, TiStore> getRegionStorePairByKey(ByteString key) {
|
||||
return getRegionStorePairByKey(key, TiStoreType.TiKV);
|
||||
}
|
||||
|
||||
public Pair<TiRegion, Store> getRegionStorePairByKey(ByteString key, TiStoreType storeType) {
|
||||
public Pair<TiRegion, TiStore> getRegionStorePairByKey(ByteString key, TiStoreType storeType) {
|
||||
return getRegionStorePairByKey(key, storeType, ConcreteBackOffer.newGetBackOff());
|
||||
}
|
||||
|
||||
public Pair<TiRegion, Store> getRegionStorePairByKey(
|
||||
public Pair<TiRegion, TiStore> getRegionStorePairByKey(
|
||||
ByteString key, TiStoreType storeType, BackOffer backOffer) {
|
||||
TiRegion region = cache.getRegionByKey(key, backOffer);
|
||||
if (region == null) {
|
||||
throw new TiClientInternalException("Region not exist for key:" + formatBytesUTF8(key));
|
||||
}
|
||||
TiRegion region = getRegionByKey(key, backOffer);
|
||||
if (!region.isValid()) {
|
||||
throw new TiClientInternalException("Region invalid: " + region.toString());
|
||||
}
|
||||
|
||||
Store store = null;
|
||||
TiStore store = null;
|
||||
if (storeType == TiStoreType.TiKV) {
|
||||
if (isReplicaRead) {
|
||||
Peer peer = region.getCurrentFollower();
|
||||
store = cache.getStoreById(peer.getStoreId(), backOffer);
|
||||
} else {
|
||||
Peer leader = region.getLeader();
|
||||
store = cache.getStoreById(leader.getStoreId(), backOffer);
|
||||
Peer peer = region.getCurrentReplica();
|
||||
store = getStoreById(peer.getStoreId(), backOffer);
|
||||
if (store == null) {
|
||||
cache.clearAll();
|
||||
}
|
||||
} else {
|
||||
outerLoop:
|
||||
for (Peer peer : region.getLearnerList()) {
|
||||
Store s = getStoreById(peer.getStoreId(), backOffer);
|
||||
for (Metapb.StoreLabel label : s.getLabelsList()) {
|
||||
TiStore s = getStoreById(peer.getStoreId(), backOffer);
|
||||
for (Metapb.StoreLabel label : s.getStore().getLabelsList()) {
|
||||
if (label.getKey().equals(storeType.getLabelKey())
|
||||
&& label.getValue().equals(storeType.getLabelValue())) {
|
||||
store = s;
|
||||
|
@ -159,48 +181,74 @@ public class RegionManager {
|
|||
return Pair.create(region, store);
|
||||
}
|
||||
|
||||
public Store getStoreById(long id) {
|
||||
return getStoreById(id, ConcreteBackOffer.newGetBackOff());
|
||||
private TiRegion createRegion(Metapb.Region region, Metapb.Peer leader, BackOffer backOffer) {
|
||||
List<Metapb.Peer> peers = region.getPeersList();
|
||||
List<TiStore> stores = getRegionStore(peers, backOffer);
|
||||
return new TiRegion(conf, region, leader, peers, stores);
|
||||
}
|
||||
|
||||
public Store getStoreById(long id, BackOffer backOffer) {
|
||||
return cache.getStoreById(id, backOffer);
|
||||
private List<TiStore> getRegionStore(List<Metapb.Peer> peers, BackOffer backOffer) {
|
||||
return peers.stream().map(p -> getStoreById(p.getStoreId())).collect(Collectors.toList());
|
||||
}
|
||||
|
||||
public TiStore getStoreById(long id, BackOffer backOffer) {
|
||||
try {
|
||||
TiStore store = cache.getStoreById(id);
|
||||
if (store == null) {
|
||||
store = new TiStore(pdClient.getStore(backOffer, id));
|
||||
}
|
||||
if (store.getStore().getState().equals(StoreState.Tombstone)) {
|
||||
return null;
|
||||
}
|
||||
if (cache.putStore(id, store) && storeChecker != null) {
|
||||
storeChecker.scheduleStoreHealthCheck(store);
|
||||
}
|
||||
return store;
|
||||
} catch (Exception e) {
|
||||
throw new GrpcException(e);
|
||||
}
|
||||
}
|
||||
|
||||
public TiStore getStoreById(long id) {
|
||||
return getStoreById(id, ConcreteBackOffer.newGetBackOff());
|
||||
}
|
||||
|
||||
public void onRegionStale(TiRegion region) {
|
||||
cache.invalidateRegion(region);
|
||||
}
|
||||
|
||||
public synchronized TiRegion updateLeader(TiRegion region, long storeId) {
|
||||
TiRegion r = cache.getRegionFromCache(region.getId());
|
||||
if (r != null) {
|
||||
if (r.getLeader().getStoreId() == storeId) {
|
||||
return r;
|
||||
}
|
||||
TiRegion newRegion = r.switchPeer(storeId);
|
||||
if (newRegion != null) {
|
||||
cache.putRegion(newRegion);
|
||||
return newRegion;
|
||||
}
|
||||
// failed to switch leader, possibly region is outdated, we need to drop region cache from
|
||||
// regionCache
|
||||
logger.warn("Cannot find peer when updating leader (" + region.getId() + "," + storeId + ")");
|
||||
public TiRegion updateLeader(TiRegion region, long storeId) {
|
||||
if (region.getLeader().getStoreId() == storeId) {
|
||||
return region;
|
||||
}
|
||||
TiRegion newRegion = region.switchPeer(storeId);
|
||||
if (cache.updateRegion(region, newRegion)) {
|
||||
return newRegion;
|
||||
}
|
||||
// failed to switch leader, possibly region is outdated, we need to drop region cache from
|
||||
// regionCache
|
||||
logger.warn("Cannot find peer when updating leader (" + region.getId() + "," + storeId + ")");
|
||||
return null;
|
||||
}
|
||||
|
||||
public synchronized void updateStore(TiStore oldStore, TiStore newStore) {
|
||||
if (cache.updateStore(oldStore, newStore) && storeChecker != null) {
|
||||
storeChecker.scheduleStoreHealthCheck(newStore);
|
||||
}
|
||||
}
|
||||
|
||||
/** Clears all cache when some unexpected error occurs. */
|
||||
public void clearRegionCache() {
|
||||
cache.clearAll();
|
||||
}
|
||||
|
||||
/**
|
||||
* Clears all cache when a TiKV server does not respond
|
||||
*
|
||||
* @param region region
|
||||
*/
|
||||
public void onRequestFail(TiRegion region) {
|
||||
onRequestFail(region, region.getLeader().getStoreId());
|
||||
}
|
||||
|
||||
private void onRequestFail(TiRegion region, long storeId) {
|
||||
public synchronized void onRequestFail(TiRegion region) {
|
||||
cache.invalidateRegion(region);
|
||||
cache.invalidateAllRegionForStore(storeId);
|
||||
}
|
||||
|
||||
public void invalidateStore(long storeId) {
|
||||
|
@ -210,135 +258,4 @@ public class RegionManager {
|
|||
public void invalidateRegion(TiRegion region) {
|
||||
cache.invalidateRegion(region);
|
||||
}
|
||||
|
||||
public static class RegionCache {
|
||||
private final Map<Long, TiRegion> regionCache;
|
||||
private final Map<Long, Store> storeCache;
|
||||
private final RangeMap<Key, Long> keyToRegionIdCache;
|
||||
private final ReadOnlyPDClient pdClient;
|
||||
|
||||
public RegionCache(ReadOnlyPDClient pdClient) {
|
||||
regionCache = new HashMap<>();
|
||||
storeCache = new HashMap<>();
|
||||
|
||||
keyToRegionIdCache = TreeRangeMap.create();
|
||||
this.pdClient = pdClient;
|
||||
}
|
||||
|
||||
public synchronized TiRegion getRegionByKey(ByteString key, BackOffer backOffer) {
|
||||
Histogram.Timer requestTimer = GET_REGION_BY_KEY_REQUEST_LATENCY.startTimer();
|
||||
try {
|
||||
Long regionId;
|
||||
if (key.isEmpty()) {
|
||||
// if key is empty, it must be the start key.
|
||||
regionId = keyToRegionIdCache.get(Key.toRawKey(key, true));
|
||||
} else {
|
||||
regionId = keyToRegionIdCache.get(Key.toRawKey(key));
|
||||
}
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug(
|
||||
String.format("getRegionByKey key[%s] -> ID[%s]", formatBytesUTF8(key), regionId));
|
||||
}
|
||||
|
||||
if (regionId == null) {
|
||||
logger.debug("Key not found in keyToRegionIdCache:" + formatBytesUTF8(key));
|
||||
TiRegion region = pdClient.getRegionByKey(backOffer, key);
|
||||
if (!putRegion(region)) {
|
||||
throw new TiClientInternalException("Invalid Region: " + region.toString());
|
||||
}
|
||||
return region;
|
||||
}
|
||||
TiRegion region;
|
||||
region = regionCache.get(regionId);
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug(String.format("getRegionByKey ID[%s] -> Region[%s]", regionId, region));
|
||||
}
|
||||
|
||||
return region;
|
||||
} finally {
|
||||
requestTimer.observeDuration();
|
||||
}
|
||||
}
|
||||
|
||||
private synchronized boolean putRegion(TiRegion region) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("putRegion: " + region);
|
||||
}
|
||||
regionCache.put(region.getId(), region);
|
||||
keyToRegionIdCache.put(makeRange(region.getStartKey(), region.getEndKey()), region.getId());
|
||||
return true;
|
||||
}
|
||||
|
||||
@Deprecated
|
||||
private synchronized TiRegion getRegionById(BackOffer backOffer, long regionId) {
|
||||
TiRegion region = regionCache.get(regionId);
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug(String.format("getRegionByKey ID[%s] -> Region[%s]", regionId, region));
|
||||
}
|
||||
if (region == null) {
|
||||
region = pdClient.getRegionByID(backOffer, regionId);
|
||||
if (!putRegion(region)) {
|
||||
throw new TiClientInternalException("Invalid Region: " + region.toString());
|
||||
}
|
||||
}
|
||||
return region;
|
||||
}
|
||||
|
||||
private synchronized TiRegion getRegionFromCache(long regionId) {
|
||||
return regionCache.get(regionId);
|
||||
}
|
||||
|
||||
/** Removes region associated with regionId from regionCache. */
|
||||
public synchronized void invalidateRegion(TiRegion region) {
|
||||
try {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug(String.format("invalidateRegion ID[%s]", region.getId()));
|
||||
}
|
||||
TiRegion oldRegion = regionCache.get(region.getId());
|
||||
if (oldRegion != null && oldRegion == region) {
|
||||
keyToRegionIdCache.remove(makeRange(region.getStartKey(), region.getEndKey()));
|
||||
regionCache.remove(region.getId());
|
||||
}
|
||||
} catch (Exception ignore) {
|
||||
}
|
||||
}
|
||||
|
||||
public synchronized void invalidateAllRegionForStore(long storeId) {
|
||||
List<TiRegion> regionToRemove = new ArrayList<>();
|
||||
for (TiRegion r : regionCache.values()) {
|
||||
if (r.getLeader().getStoreId() == storeId) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug(String.format("invalidateAllRegionForStore Region[%s]", r));
|
||||
}
|
||||
regionToRemove.add(r);
|
||||
}
|
||||
}
|
||||
|
||||
// remove region
|
||||
for (TiRegion r : regionToRemove) {
|
||||
regionCache.remove(r.getId());
|
||||
keyToRegionIdCache.remove(makeRange(r.getStartKey(), r.getEndKey()));
|
||||
}
|
||||
}
|
||||
|
||||
public synchronized void invalidateStore(long storeId) {
|
||||
storeCache.remove(storeId);
|
||||
}
|
||||
|
||||
public synchronized Store getStoreById(long id, BackOffer backOffer) {
|
||||
try {
|
||||
Store store = storeCache.get(id);
|
||||
if (store == null) {
|
||||
store = pdClient.getStore(backOffer, id);
|
||||
}
|
||||
if (store.getState().equals(StoreState.Tombstone)) {
|
||||
return null;
|
||||
}
|
||||
storeCache.put(id, store);
|
||||
return store;
|
||||
} catch (Exception e) {
|
||||
throw new GrpcException(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -26,10 +26,11 @@ import com.google.protobuf.InvalidProtocolBufferException;
|
|||
import com.pingcap.tidb.tipb.DAGRequest;
|
||||
import com.pingcap.tidb.tipb.SelectResponse;
|
||||
import io.grpc.ManagedChannel;
|
||||
import io.grpc.Metadata;
|
||||
import io.grpc.stub.MetadataUtils;
|
||||
import io.prometheus.client.Histogram;
|
||||
import java.util.*;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.stream.Collectors;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.tikv.common.PDClient;
|
||||
|
@ -38,12 +39,13 @@ import org.tikv.common.TiConfiguration;
|
|||
import org.tikv.common.Version;
|
||||
import org.tikv.common.exception.*;
|
||||
import org.tikv.common.operation.KVErrorHandler;
|
||||
import org.tikv.common.operation.RegionErrorHandler;
|
||||
import org.tikv.common.streaming.StreamingResponse;
|
||||
import org.tikv.common.util.*;
|
||||
import org.tikv.kvproto.Coprocessor;
|
||||
import org.tikv.kvproto.Errorpb;
|
||||
import org.tikv.kvproto.Kvrpcpb.*;
|
||||
import org.tikv.kvproto.Metapb.Store;
|
||||
import org.tikv.kvproto.Metapb;
|
||||
import org.tikv.kvproto.TikvGrpc;
|
||||
import org.tikv.kvproto.TikvGrpc.TikvBlockingStub;
|
||||
import org.tikv.kvproto.TikvGrpc.TikvStub;
|
||||
|
@ -87,7 +89,7 @@ public class RegionStoreClient extends AbstractRegionStoreClient {
|
|||
private RegionStoreClient(
|
||||
TiConfiguration conf,
|
||||
TiRegion region,
|
||||
String storeVersion,
|
||||
TiStore store,
|
||||
TiStoreType storeType,
|
||||
ChannelFactory channelFactory,
|
||||
TikvBlockingStub blockingStub,
|
||||
|
@ -95,15 +97,15 @@ public class RegionStoreClient extends AbstractRegionStoreClient {
|
|||
RegionManager regionManager,
|
||||
PDClient pdClient,
|
||||
RegionStoreClient.RegionStoreClientBuilder clientBuilder) {
|
||||
super(conf, region, channelFactory, blockingStub, asyncStub, regionManager);
|
||||
super(conf, region, store, channelFactory, blockingStub, asyncStub, regionManager);
|
||||
this.storeType = storeType;
|
||||
|
||||
if (this.storeType == TiStoreType.TiKV) {
|
||||
this.lockResolverClient =
|
||||
AbstractLockResolverClient.getInstance(
|
||||
storeVersion,
|
||||
conf,
|
||||
region,
|
||||
store,
|
||||
this.blockingStub,
|
||||
this.asyncStub,
|
||||
channelFactory,
|
||||
|
@ -112,10 +114,10 @@ public class RegionStoreClient extends AbstractRegionStoreClient {
|
|||
clientBuilder);
|
||||
|
||||
} else {
|
||||
Store tikvStore =
|
||||
TiStore tikvStore =
|
||||
regionManager.getRegionStorePairByKey(region.getStartKey(), TiStoreType.TiKV).second;
|
||||
|
||||
String addressStr = tikvStore.getAddress();
|
||||
String addressStr = tikvStore.getStore().getAddress();
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug(String.format("Create region store client on address %s", addressStr));
|
||||
}
|
||||
|
@ -126,9 +128,9 @@ public class RegionStoreClient extends AbstractRegionStoreClient {
|
|||
|
||||
this.lockResolverClient =
|
||||
AbstractLockResolverClient.getInstance(
|
||||
tikvStore.getVersion(),
|
||||
conf,
|
||||
region,
|
||||
tikvStore,
|
||||
tikvBlockingStub,
|
||||
tikvAsyncStub,
|
||||
channelFactory,
|
||||
|
@ -169,7 +171,7 @@ public class RegionStoreClient extends AbstractRegionStoreClient {
|
|||
Supplier<GetRequest> factory =
|
||||
() ->
|
||||
GetRequest.newBuilder()
|
||||
.setContext(region.getContext(getResolvedLocks(version)))
|
||||
.setContext(makeContext(getResolvedLocks(version), this.storeType))
|
||||
.setKey(key)
|
||||
.setVersion(version)
|
||||
.build();
|
||||
|
@ -214,7 +216,7 @@ public class RegionStoreClient extends AbstractRegionStoreClient {
|
|||
Supplier<BatchGetRequest> request =
|
||||
() ->
|
||||
BatchGetRequest.newBuilder()
|
||||
.setContext(region.getContext(getResolvedLocks(version)))
|
||||
.setContext(makeContext(getResolvedLocks(version), this.storeType))
|
||||
.addAllKeys(keys)
|
||||
.setVersion(version)
|
||||
.build();
|
||||
|
@ -277,7 +279,7 @@ public class RegionStoreClient extends AbstractRegionStoreClient {
|
|||
Supplier<ScanRequest> request =
|
||||
() ->
|
||||
ScanRequest.newBuilder()
|
||||
.setContext(region.getContext(getResolvedLocks(version)))
|
||||
.setContext(makeContext(getResolvedLocks(version), this.storeType))
|
||||
.setStartKey(startKey)
|
||||
.setVersion(version)
|
||||
.setKeyOnly(keyOnly)
|
||||
|
@ -379,7 +381,7 @@ public class RegionStoreClient extends AbstractRegionStoreClient {
|
|||
() ->
|
||||
getIsV4()
|
||||
? PrewriteRequest.newBuilder()
|
||||
.setContext(region.getContext())
|
||||
.setContext(makeContext(storeType))
|
||||
.setStartVersion(startTs)
|
||||
.setPrimaryLock(primaryLock)
|
||||
.addAllMutations(mutations)
|
||||
|
@ -389,7 +391,7 @@ public class RegionStoreClient extends AbstractRegionStoreClient {
|
|||
.setTxnSize(16)
|
||||
.build()
|
||||
: PrewriteRequest.newBuilder()
|
||||
.setContext(region.getContext())
|
||||
.setContext(makeContext(storeType))
|
||||
.setStartVersion(startTs)
|
||||
.setPrimaryLock(primaryLock)
|
||||
.addAllMutations(mutations)
|
||||
|
@ -469,7 +471,7 @@ public class RegionStoreClient extends AbstractRegionStoreClient {
|
|||
Supplier<TxnHeartBeatRequest> factory =
|
||||
() ->
|
||||
TxnHeartBeatRequest.newBuilder()
|
||||
.setContext(region.getContext())
|
||||
.setContext(makeContext(storeType))
|
||||
.setStartVersion(startTs)
|
||||
.setPrimaryLock(primaryLock)
|
||||
.setAdviseLockTtl(ttl)
|
||||
|
@ -527,7 +529,7 @@ public class RegionStoreClient extends AbstractRegionStoreClient {
|
|||
.setStartVersion(startTs)
|
||||
.setCommitVersion(commitTs)
|
||||
.addAllKeys(keys)
|
||||
.setContext(region.getContext())
|
||||
.setContext(makeContext(storeType))
|
||||
.build();
|
||||
KVErrorHandler<CommitResponse> handler =
|
||||
new KVErrorHandler<>(
|
||||
|
@ -588,7 +590,7 @@ public class RegionStoreClient extends AbstractRegionStoreClient {
|
|||
Supplier<Coprocessor.Request> reqToSend =
|
||||
() ->
|
||||
Coprocessor.Request.newBuilder()
|
||||
.setContext(region.getContext(getResolvedLocks(startTs)))
|
||||
.setContext(makeContext(getResolvedLocks(startTs), this.storeType))
|
||||
.setTp(REQ_TYPE_DAG.getValue())
|
||||
.setStartTs(startTs)
|
||||
.setData(req.toByteString())
|
||||
|
@ -711,7 +713,7 @@ public class RegionStoreClient extends AbstractRegionStoreClient {
|
|||
Supplier<Coprocessor.Request> reqToSend =
|
||||
() ->
|
||||
Coprocessor.Request.newBuilder()
|
||||
.setContext(region.getContext(getResolvedLocks(startTs)))
|
||||
.setContext(makeContext(getResolvedLocks(startTs), this.storeType))
|
||||
// TODO: If no executors...?
|
||||
.setTp(REQ_TYPE_DAG.getValue())
|
||||
.setData(req.toByteString())
|
||||
|
@ -745,11 +747,11 @@ public class RegionStoreClient extends AbstractRegionStoreClient {
|
|||
* @param splitKeys is the split points for a specific region.
|
||||
* @return a split region info.
|
||||
*/
|
||||
public List<TiRegion> splitRegion(Iterable<ByteString> splitKeys) {
|
||||
public List<Metapb.Region> splitRegion(Iterable<ByteString> splitKeys) {
|
||||
Supplier<SplitRegionRequest> request =
|
||||
() ->
|
||||
SplitRegionRequest.newBuilder()
|
||||
.setContext(region.getContext())
|
||||
.setContext(makeContext(storeType))
|
||||
.addAllSplitKeys(splitKeys)
|
||||
.build();
|
||||
|
||||
|
@ -780,18 +782,7 @@ public class RegionStoreClient extends AbstractRegionStoreClient {
|
|||
region.getId(), resp.getRegionError().toString()));
|
||||
}
|
||||
|
||||
return resp.getRegionsList()
|
||||
.stream()
|
||||
.map(
|
||||
region ->
|
||||
new TiRegion(
|
||||
region,
|
||||
null,
|
||||
conf.getIsolationLevel(),
|
||||
conf.getCommandPriority(),
|
||||
conf.getKvMode(),
|
||||
conf.isReplicaRead()))
|
||||
.collect(Collectors.toList());
|
||||
return resp.getRegionsList();
|
||||
}
|
||||
|
||||
// APIs for Raw Scan/Put/Get/Delete
|
||||
|
@ -801,9 +792,9 @@ public class RegionStoreClient extends AbstractRegionStoreClient {
|
|||
GRPC_RAW_REQUEST_LATENCY.labels("client_grpc_raw_get").startTimer();
|
||||
try {
|
||||
Supplier<RawGetRequest> factory =
|
||||
() -> RawGetRequest.newBuilder().setContext(region.getContext()).setKey(key).build();
|
||||
KVErrorHandler<RawGetResponse> handler =
|
||||
new KVErrorHandler<>(
|
||||
() -> RawGetRequest.newBuilder().setContext(makeContext(storeType)).setKey(key).build();
|
||||
RegionErrorHandler<RawGetResponse> handler =
|
||||
new RegionErrorHandler<RawGetResponse>(
|
||||
regionManager, this, resp -> resp.hasRegionError() ? resp.getRegionError() : null);
|
||||
RawGetResponse resp = callWithRetry(backOffer, TikvGrpc.getRawGetMethod(), factory, handler);
|
||||
return rawGetHelper(resp);
|
||||
|
@ -833,9 +824,12 @@ public class RegionStoreClient extends AbstractRegionStoreClient {
|
|||
try {
|
||||
Supplier<RawGetKeyTTLRequest> factory =
|
||||
() ->
|
||||
RawGetKeyTTLRequest.newBuilder().setContext(region.getContext()).setKey(key).build();
|
||||
KVErrorHandler<RawGetKeyTTLResponse> handler =
|
||||
new KVErrorHandler<>(
|
||||
RawGetKeyTTLRequest.newBuilder()
|
||||
.setContext(makeContext(storeType))
|
||||
.setKey(key)
|
||||
.build();
|
||||
RegionErrorHandler<RawGetKeyTTLResponse> handler =
|
||||
new RegionErrorHandler<RawGetKeyTTLResponse>(
|
||||
regionManager, this, resp -> resp.hasRegionError() ? resp.getRegionError() : null);
|
||||
RawGetKeyTTLResponse resp =
|
||||
callWithRetry(backOffer, TikvGrpc.getRawGetKeyTTLMethod(), factory, handler);
|
||||
|
@ -863,15 +857,20 @@ public class RegionStoreClient extends AbstractRegionStoreClient {
|
|||
return resp.getTtl();
|
||||
}
|
||||
|
||||
public void rawDelete(BackOffer backOffer, ByteString key) {
|
||||
public void rawDelete(BackOffer backOffer, ByteString key, boolean atomic) {
|
||||
Histogram.Timer requestTimer =
|
||||
GRPC_RAW_REQUEST_LATENCY.labels("client_grpc_raw_delete").startTimer();
|
||||
try {
|
||||
Supplier<RawDeleteRequest> factory =
|
||||
() -> RawDeleteRequest.newBuilder().setContext(region.getContext()).setKey(key).build();
|
||||
() ->
|
||||
RawDeleteRequest.newBuilder()
|
||||
.setContext(makeContext(storeType))
|
||||
.setKey(key)
|
||||
.setForCas(atomic)
|
||||
.build();
|
||||
|
||||
KVErrorHandler<RawDeleteResponse> handler =
|
||||
new KVErrorHandler<>(
|
||||
RegionErrorHandler<RawDeleteResponse> handler =
|
||||
new RegionErrorHandler<RawDeleteResponse>(
|
||||
regionManager, this, resp -> resp.hasRegionError() ? resp.getRegionError() : null);
|
||||
RawDeleteResponse resp =
|
||||
callWithRetry(backOffer, TikvGrpc.getRawDeleteMethod(), factory, handler);
|
||||
|
@ -895,21 +894,23 @@ public class RegionStoreClient extends AbstractRegionStoreClient {
|
|||
}
|
||||
}
|
||||
|
||||
public void rawPut(BackOffer backOffer, ByteString key, ByteString value, long ttl) {
|
||||
public void rawPut(
|
||||
BackOffer backOffer, ByteString key, ByteString value, long ttl, boolean atomic) {
|
||||
Histogram.Timer requestTimer =
|
||||
GRPC_RAW_REQUEST_LATENCY.labels("client_grpc_raw_put").startTimer();
|
||||
try {
|
||||
Supplier<RawPutRequest> factory =
|
||||
() ->
|
||||
RawPutRequest.newBuilder()
|
||||
.setContext(region.getContext())
|
||||
.setContext(makeContext(storeType))
|
||||
.setKey(key)
|
||||
.setValue(value)
|
||||
.setTtl(ttl)
|
||||
.setForCas(atomic)
|
||||
.build();
|
||||
|
||||
KVErrorHandler<RawPutResponse> handler =
|
||||
new KVErrorHandler<>(
|
||||
RegionErrorHandler<RawPutResponse> handler =
|
||||
new RegionErrorHandler<RawPutResponse>(
|
||||
regionManager, this, resp -> resp.hasRegionError() ? resp.getRegionError() : null);
|
||||
RawPutResponse resp = callWithRetry(backOffer, TikvGrpc.getRawPutMethod(), factory, handler);
|
||||
rawPutHelper(resp);
|
||||
|
@ -940,15 +941,15 @@ public class RegionStoreClient extends AbstractRegionStoreClient {
|
|||
Supplier<RawCASRequest> factory =
|
||||
() ->
|
||||
RawCASRequest.newBuilder()
|
||||
.setContext(region.getContext())
|
||||
.setContext(makeContext(storeType))
|
||||
.setKey(key)
|
||||
.setValue(value)
|
||||
.setPreviousNotExist(true)
|
||||
.setTtl(ttl)
|
||||
.build();
|
||||
|
||||
KVErrorHandler<RawCASResponse> handler =
|
||||
new KVErrorHandler<>(
|
||||
RegionErrorHandler<RawCASResponse> handler =
|
||||
new RegionErrorHandler<RawCASResponse>(
|
||||
regionManager, this, resp -> resp.hasRegionError() ? resp.getRegionError() : null);
|
||||
RawCASResponse resp =
|
||||
callWithRetry(backOffer, TikvGrpc.getRawCompareAndSwapMethod(), factory, handler);
|
||||
|
@ -986,11 +987,11 @@ public class RegionStoreClient extends AbstractRegionStoreClient {
|
|||
Supplier<RawBatchGetRequest> factory =
|
||||
() ->
|
||||
RawBatchGetRequest.newBuilder()
|
||||
.setContext(region.getContext())
|
||||
.setContext(makeContext(storeType))
|
||||
.addAllKeys(keys)
|
||||
.build();
|
||||
KVErrorHandler<RawBatchGetResponse> handler =
|
||||
new KVErrorHandler<>(
|
||||
RegionErrorHandler<RawBatchGetResponse> handler =
|
||||
new RegionErrorHandler<RawBatchGetResponse>(
|
||||
regionManager, this, resp -> resp.hasRegionError() ? resp.getRegionError() : null);
|
||||
RawBatchGetResponse resp =
|
||||
callWithRetry(backoffer, TikvGrpc.getRawBatchGetMethod(), factory, handler);
|
||||
|
@ -1021,13 +1022,13 @@ public class RegionStoreClient extends AbstractRegionStoreClient {
|
|||
Supplier<RawBatchPutRequest> factory =
|
||||
() ->
|
||||
RawBatchPutRequest.newBuilder()
|
||||
.setContext(region.getContext())
|
||||
.setContext(makeContext(storeType))
|
||||
.addAllPairs(kvPairs)
|
||||
.setTtl(ttl)
|
||||
.setForCas(atomic)
|
||||
.build();
|
||||
KVErrorHandler<RawBatchPutResponse> handler =
|
||||
new KVErrorHandler<>(
|
||||
RegionErrorHandler<RawBatchPutResponse> handler =
|
||||
new RegionErrorHandler<RawBatchPutResponse>(
|
||||
regionManager, this, resp -> resp.hasRegionError() ? resp.getRegionError() : null);
|
||||
RawBatchPutResponse resp =
|
||||
callWithRetry(backOffer, TikvGrpc.getRawBatchPutMethod(), factory, handler);
|
||||
|
@ -1073,12 +1074,12 @@ public class RegionStoreClient extends AbstractRegionStoreClient {
|
|||
Supplier<RawBatchDeleteRequest> factory =
|
||||
() ->
|
||||
RawBatchDeleteRequest.newBuilder()
|
||||
.setContext(region.getContext())
|
||||
.setContext(makeContext(storeType))
|
||||
.addAllKeys(keys)
|
||||
.setForCas(atomic)
|
||||
.build();
|
||||
KVErrorHandler<RawBatchDeleteResponse> handler =
|
||||
new KVErrorHandler<>(
|
||||
RegionErrorHandler<RawBatchDeleteResponse> handler =
|
||||
new RegionErrorHandler<RawBatchDeleteResponse>(
|
||||
regionManager, this, resp -> resp.hasRegionError() ? resp.getRegionError() : null);
|
||||
RawBatchDeleteResponse resp =
|
||||
callWithRetry(backoffer, TikvGrpc.getRawBatchDeleteMethod(), factory, handler);
|
||||
|
@ -1118,14 +1119,14 @@ public class RegionStoreClient extends AbstractRegionStoreClient {
|
|||
Supplier<RawScanRequest> factory =
|
||||
() ->
|
||||
RawScanRequest.newBuilder()
|
||||
.setContext(region.getContext())
|
||||
.setContext(makeContext(storeType))
|
||||
.setStartKey(key)
|
||||
.setKeyOnly(keyOnly)
|
||||
.setLimit(limit)
|
||||
.build();
|
||||
|
||||
KVErrorHandler<RawScanResponse> handler =
|
||||
new KVErrorHandler<>(
|
||||
RegionErrorHandler<RawScanResponse> handler =
|
||||
new RegionErrorHandler<RawScanResponse>(
|
||||
regionManager, this, resp -> resp.hasRegionError() ? resp.getRegionError() : null);
|
||||
RawScanResponse resp =
|
||||
callWithRetry(backOffer, TikvGrpc.getRawScanMethod(), factory, handler);
|
||||
|
@ -1164,13 +1165,13 @@ public class RegionStoreClient extends AbstractRegionStoreClient {
|
|||
Supplier<RawDeleteRangeRequest> factory =
|
||||
() ->
|
||||
RawDeleteRangeRequest.newBuilder()
|
||||
.setContext(region.getContext())
|
||||
.setContext(makeContext(storeType))
|
||||
.setStartKey(startKey)
|
||||
.setEndKey(endKey)
|
||||
.build();
|
||||
|
||||
KVErrorHandler<RawDeleteRangeResponse> handler =
|
||||
new KVErrorHandler<>(
|
||||
RegionErrorHandler<RawDeleteRangeResponse> handler =
|
||||
new RegionErrorHandler<RawDeleteRangeResponse>(
|
||||
regionManager, this, resp -> resp.hasRegionError() ? resp.getRegionError() : null);
|
||||
RawDeleteRangeResponse resp =
|
||||
callWithRetry(backOffer, TikvGrpc.getRawDeleteRangeMethod(), factory, handler);
|
||||
|
@ -1232,25 +1233,39 @@ public class RegionStoreClient extends AbstractRegionStoreClient {
|
|||
this.pdClient = pdClient;
|
||||
}
|
||||
|
||||
public RegionStoreClient build(TiRegion region, Store store, TiStoreType storeType)
|
||||
public RegionStoreClient build(TiRegion region, TiStore store, TiStoreType storeType)
|
||||
throws GrpcException {
|
||||
Objects.requireNonNull(region, "region is null");
|
||||
Objects.requireNonNull(store, "store is null");
|
||||
Objects.requireNonNull(storeType, "storeType is null");
|
||||
|
||||
String addressStr = store.getAddress();
|
||||
String addressStr = store.getStore().getAddress();
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug(String.format("Create region store client on address %s", addressStr));
|
||||
}
|
||||
ManagedChannel channel = channelFactory.getChannel(addressStr, pdClient.getHostMapping());
|
||||
ManagedChannel channel = null;
|
||||
|
||||
TikvBlockingStub blockingStub = TikvGrpc.newBlockingStub(channel);
|
||||
TikvStub asyncStub = TikvGrpc.newStub(channel);
|
||||
TikvBlockingStub blockingStub = null;
|
||||
TikvStub asyncStub = null;
|
||||
|
||||
if (conf.getEnableGrpcForward() && store.getProxyStore() != null && !store.isReachable()) {
|
||||
addressStr = store.getProxyStore().getAddress();
|
||||
channel =
|
||||
channelFactory.getChannel(addressStr, regionManager.getPDClient().getHostMapping());
|
||||
Metadata header = new Metadata();
|
||||
header.put(TiConfiguration.FORWARD_META_DATA_KEY, store.getStore().getAddress());
|
||||
blockingStub = MetadataUtils.attachHeaders(TikvGrpc.newBlockingStub(channel), header);
|
||||
asyncStub = MetadataUtils.attachHeaders(TikvGrpc.newStub(channel), header);
|
||||
} else {
|
||||
channel = channelFactory.getChannel(addressStr, pdClient.getHostMapping());
|
||||
blockingStub = TikvGrpc.newBlockingStub(channel);
|
||||
asyncStub = TikvGrpc.newStub(channel);
|
||||
}
|
||||
|
||||
return new RegionStoreClient(
|
||||
conf,
|
||||
region,
|
||||
store.getVersion(),
|
||||
store,
|
||||
storeType,
|
||||
channelFactory,
|
||||
blockingStub,
|
||||
|
@ -1260,7 +1275,8 @@ public class RegionStoreClient extends AbstractRegionStoreClient {
|
|||
this);
|
||||
}
|
||||
|
||||
public synchronized RegionStoreClient build(TiRegion region, Store store) throws GrpcException {
|
||||
public synchronized RegionStoreClient build(TiRegion region, TiStore store)
|
||||
throws GrpcException {
|
||||
return build(region, store, TiStoreType.TiKV);
|
||||
}
|
||||
|
||||
|
@ -1270,12 +1286,12 @@ public class RegionStoreClient extends AbstractRegionStoreClient {
|
|||
|
||||
public synchronized RegionStoreClient build(ByteString key, TiStoreType storeType)
|
||||
throws GrpcException {
|
||||
Pair<TiRegion, Store> pair = regionManager.getRegionStorePairByKey(key, storeType);
|
||||
Pair<TiRegion, TiStore> pair = regionManager.getRegionStorePairByKey(key, storeType);
|
||||
return build(pair.first, pair.second, storeType);
|
||||
}
|
||||
|
||||
public synchronized RegionStoreClient build(TiRegion region) throws GrpcException {
|
||||
Store store = regionManager.getStoreById(region.getLeader().getStoreId());
|
||||
TiStore store = regionManager.getStoreById(region.getLeader().getStoreId());
|
||||
return build(region, store, TiStoreType.TiKV);
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,151 @@
|
|||
package org.tikv.common.region;
|
||||
|
||||
import io.grpc.ManagedChannel;
|
||||
import io.grpc.health.v1.HealthCheckRequest;
|
||||
import io.grpc.health.v1.HealthCheckResponse;
|
||||
import io.grpc.health.v1.HealthGrpc;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.BlockingQueue;
|
||||
import java.util.concurrent.LinkedBlockingQueue;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.tikv.common.ReadOnlyPDClient;
|
||||
import org.tikv.common.util.ChannelFactory;
|
||||
import org.tikv.common.util.ConcreteBackOffer;
|
||||
import org.tikv.kvproto.Metapb;
|
||||
|
||||
public class StoreHealthyChecker implements Runnable {
|
||||
private static final Logger logger = LoggerFactory.getLogger(StoreHealthyChecker.class);
|
||||
private static final long MAX_CHECK_STORE_TOMBSTONE_TICK = 60;
|
||||
private BlockingQueue<TiStore> taskQueue;
|
||||
private final ChannelFactory channelFactory;
|
||||
private final ReadOnlyPDClient pdClient;
|
||||
private final RegionCache cache;
|
||||
private long checkTombstoneTick;
|
||||
private long timeout;
|
||||
|
||||
public StoreHealthyChecker(
|
||||
ChannelFactory channelFactory, ReadOnlyPDClient pdClient, RegionCache cache, long timeout) {
|
||||
this.taskQueue = new LinkedBlockingQueue<>();
|
||||
this.channelFactory = channelFactory;
|
||||
this.pdClient = pdClient;
|
||||
this.cache = cache;
|
||||
this.checkTombstoneTick = 0;
|
||||
this.timeout = timeout;
|
||||
}
|
||||
|
||||
public boolean scheduleStoreHealthCheck(TiStore store) {
|
||||
if (!this.taskQueue.add(store)) {
|
||||
// add queue false, mark it reachable so that it can be put again.
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
private List<TiStore> getValidStores() {
|
||||
List<TiStore> unhealthStore = new LinkedList<>();
|
||||
while (!this.taskQueue.isEmpty()) {
|
||||
try {
|
||||
TiStore store = this.taskQueue.take();
|
||||
if (!store.isValid()) {
|
||||
continue;
|
||||
}
|
||||
unhealthStore.add(store);
|
||||
} catch (Exception e) {
|
||||
return unhealthStore;
|
||||
}
|
||||
}
|
||||
return unhealthStore;
|
||||
}
|
||||
|
||||
private boolean checkStoreHealth(TiStore store) {
|
||||
String addressStr = store.getStore().getAddress();
|
||||
try {
|
||||
ManagedChannel channel = channelFactory.getChannel(addressStr, pdClient.getHostMapping());
|
||||
HealthGrpc.HealthBlockingStub stub =
|
||||
HealthGrpc.newBlockingStub(channel).withDeadlineAfter(timeout, TimeUnit.MILLISECONDS);
|
||||
HealthCheckRequest req = HealthCheckRequest.newBuilder().build();
|
||||
HealthCheckResponse resp = stub.check(req);
|
||||
if (resp.getStatus() == HealthCheckResponse.ServingStatus.SERVING) {
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
private boolean checkStoreTombstone(TiStore store) {
|
||||
try {
|
||||
Metapb.Store newStore = pdClient.getStore(ConcreteBackOffer.newRawKVBackOff(), store.getId());
|
||||
if (newStore.getState() == Metapb.StoreState.Tombstone) {
|
||||
return true;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
return false;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
checkTombstoneTick += 1;
|
||||
boolean needCheckTombstoneStore = false;
|
||||
if (checkTombstoneTick >= MAX_CHECK_STORE_TOMBSTONE_TICK) {
|
||||
needCheckTombstoneStore = true;
|
||||
checkTombstoneTick = 0;
|
||||
}
|
||||
List<TiStore> allStores = getValidStores();
|
||||
List<TiStore> unreachableStore = new LinkedList<>();
|
||||
for (TiStore store : allStores) {
|
||||
if (needCheckTombstoneStore) {
|
||||
if (checkStoreTombstone(store)) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if (checkStoreHealth(store)) {
|
||||
if (store.getProxyStore() != null) {
|
||||
TiStore newStore = store.withProxy(null);
|
||||
logger.warn(String.format("store [%s] recovers to be reachable", store.getAddress()));
|
||||
if (cache.putStore(newStore.getId(), newStore)) {
|
||||
this.taskQueue.add(newStore);
|
||||
continue;
|
||||
}
|
||||
} else {
|
||||
if (!store.isReachable()) {
|
||||
logger.warn(
|
||||
String.format(
|
||||
"store [%s] recovers to be reachable and canforward", store.getAddress()));
|
||||
store.markReachable();
|
||||
}
|
||||
if (!store.canForwardFirst()) {
|
||||
store.makrCanForward();
|
||||
}
|
||||
}
|
||||
} else if (store.isReachable()) {
|
||||
unreachableStore.add(store);
|
||||
continue;
|
||||
}
|
||||
this.taskQueue.add(store);
|
||||
}
|
||||
if (!unreachableStore.isEmpty()) {
|
||||
try {
|
||||
Thread.sleep(timeout);
|
||||
} catch (Exception e) {
|
||||
this.taskQueue.addAll(unreachableStore);
|
||||
return;
|
||||
}
|
||||
for (TiStore store : unreachableStore) {
|
||||
if (!checkStoreHealth(store)) {
|
||||
logger.warn(String.format("store [%s] is not reachable", store.getAddress()));
|
||||
store.markUnreachable();
|
||||
}
|
||||
this.taskQueue.add(store);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -22,14 +22,15 @@ import java.io.Serializable;
|
|||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import java.util.Random;
|
||||
import java.util.Set;
|
||||
import org.tikv.common.TiConfiguration.KVMode;
|
||||
import org.tikv.common.codec.Codec.BytesCodec;
|
||||
import org.tikv.common.codec.CodecDataInput;
|
||||
import java.util.stream.Collectors;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.tikv.common.TiConfiguration;
|
||||
import org.tikv.common.codec.KeyUtils;
|
||||
import org.tikv.common.exception.TiClientInternalException;
|
||||
import org.tikv.common.key.Key;
|
||||
import org.tikv.common.replica.ReplicaSelector;
|
||||
import org.tikv.common.util.FastByteComparisons;
|
||||
import org.tikv.common.util.KeyRangeUtils;
|
||||
import org.tikv.kvproto.Kvrpcpb;
|
||||
|
@ -39,44 +40,28 @@ import org.tikv.kvproto.Metapb.Peer;
|
|||
import org.tikv.kvproto.Metapb.Region;
|
||||
|
||||
public class TiRegion implements Serializable {
|
||||
private static final Logger logger = LoggerFactory.getLogger(TiRegion.class);
|
||||
|
||||
private final Region meta;
|
||||
private final IsolationLevel isolationLevel;
|
||||
private final Kvrpcpb.CommandPri commandPri;
|
||||
private final TiConfiguration conf;
|
||||
private final Peer leader;
|
||||
private int followerIdx = 0;
|
||||
private final boolean isReplicaRead;
|
||||
private final ReplicaSelector replicaSelector;
|
||||
private final List<Peer> replicaList;
|
||||
private int replicaIdx;
|
||||
private final List<Peer> peers;
|
||||
private final List<TiStore> stores;
|
||||
|
||||
public TiRegion(
|
||||
Region meta,
|
||||
Peer leader,
|
||||
IsolationLevel isolationLevel,
|
||||
Kvrpcpb.CommandPri commandPri,
|
||||
KVMode kvMode) {
|
||||
this(meta, leader, isolationLevel, commandPri, kvMode, false);
|
||||
}
|
||||
|
||||
private TiRegion(
|
||||
Region meta,
|
||||
Peer leader,
|
||||
IsolationLevel isolationLevel,
|
||||
Kvrpcpb.CommandPri commandPri,
|
||||
boolean isReplicaRead) {
|
||||
this.meta = meta;
|
||||
this.leader = leader;
|
||||
this.isolationLevel = isolationLevel;
|
||||
this.commandPri = commandPri;
|
||||
this.isReplicaRead = isReplicaRead;
|
||||
}
|
||||
|
||||
public TiRegion(
|
||||
Region meta,
|
||||
Peer leader,
|
||||
IsolationLevel isolationLevel,
|
||||
Kvrpcpb.CommandPri commandPri,
|
||||
KVMode kvMode,
|
||||
boolean isReplicaRead) {
|
||||
Objects.requireNonNull(meta, "meta is null");
|
||||
this.meta = decodeRegion(meta, kvMode == KVMode.RAW);
|
||||
TiConfiguration conf, Region meta, Peer leader, List<Peer> peers, List<TiStore> stores) {
|
||||
this.conf = Objects.requireNonNull(conf, "conf is null");
|
||||
this.meta = Objects.requireNonNull(meta, "meta is null");
|
||||
this.isolationLevel = conf.getIsolationLevel();
|
||||
this.commandPri = conf.getCommandPriority();
|
||||
this.peers = peers;
|
||||
this.stores = stores;
|
||||
this.replicaSelector = conf.getReplicaSelector();
|
||||
if (leader == null || leader.getId() == 0) {
|
||||
if (meta.getPeersCount() == 0) {
|
||||
throw new TiClientInternalException("Empty peer list for region " + meta.getId());
|
||||
|
@ -86,77 +71,56 @@ public class TiRegion implements Serializable {
|
|||
} else {
|
||||
this.leader = leader;
|
||||
}
|
||||
if (isReplicaRead && meta.getPeersCount() > 0) {
|
||||
// try to get first follower
|
||||
try {
|
||||
chooseRandomFollower();
|
||||
} catch (Exception ignore) {
|
||||
// ignore
|
||||
}
|
||||
}
|
||||
this.isolationLevel = isolationLevel;
|
||||
this.commandPri = commandPri;
|
||||
this.isReplicaRead = isReplicaRead;
|
||||
}
|
||||
|
||||
private Region decodeRegion(Region region, boolean isRawRegion) {
|
||||
Region.Builder builder =
|
||||
Region.newBuilder()
|
||||
.setId(region.getId())
|
||||
.setRegionEpoch(region.getRegionEpoch())
|
||||
.addAllPeers(region.getPeersList());
|
||||
|
||||
if (region.getStartKey().isEmpty() || isRawRegion) {
|
||||
builder.setStartKey(region.getStartKey());
|
||||
} else {
|
||||
byte[] decodedStartKey = BytesCodec.readBytes(new CodecDataInput(region.getStartKey()));
|
||||
builder.setStartKey(ByteString.copyFrom(decodedStartKey));
|
||||
}
|
||||
|
||||
if (region.getEndKey().isEmpty() || isRawRegion) {
|
||||
builder.setEndKey(region.getEndKey());
|
||||
} else {
|
||||
byte[] decodedEndKey = BytesCodec.readBytes(new CodecDataInput(region.getEndKey()));
|
||||
builder.setEndKey(ByteString.copyFrom(decodedEndKey));
|
||||
}
|
||||
|
||||
return builder.build();
|
||||
// init replicaList
|
||||
replicaList =
|
||||
replicaSelector
|
||||
.select(new org.tikv.common.replica.Region(meta, this.leader, peers, stores))
|
||||
.stream()
|
||||
.map(org.tikv.common.replica.Store::getPeer)
|
||||
.collect(Collectors.toList());
|
||||
replicaIdx = 0;
|
||||
}
|
||||
|
||||
public Peer getLeader() {
|
||||
return leader;
|
||||
}
|
||||
|
||||
public Peer getCurrentFollower() {
|
||||
return meta.getPeers(followerIdx);
|
||||
}
|
||||
|
||||
private boolean isValidFollower(Peer peer) {
|
||||
return Metapb.PeerRole.valueOf(peer.getRole().getValueDescriptor()) == Metapb.PeerRole.Voter;
|
||||
}
|
||||
|
||||
private void chooseRandomFollower() {
|
||||
int cnt = meta.getPeersCount();
|
||||
followerIdx = new Random().nextInt(cnt);
|
||||
for (int retry = cnt - 1; retry > 0; retry--) {
|
||||
followerIdx = (followerIdx + 1) % cnt;
|
||||
Peer cur = meta.getPeers(followerIdx);
|
||||
if (isValidFollower(cur)) {
|
||||
return;
|
||||
public List<Peer> getFollowerList() {
|
||||
List<Peer> peers = new ArrayList<>();
|
||||
for (Peer peer : getMeta().getPeersList()) {
|
||||
if (!peer.equals(this.leader)) {
|
||||
if (peer.getRole().equals(Metapb.PeerRole.Voter)) {
|
||||
peers.add(peer);
|
||||
}
|
||||
}
|
||||
}
|
||||
return peers;
|
||||
}
|
||||
|
||||
public List<Peer> getLearnerList() {
|
||||
List<Peer> peers = new ArrayList<>();
|
||||
for (Peer peer : getMeta().getPeersList()) {
|
||||
if (isValidFollower(peer)) {
|
||||
if (peer.getRole().equals(Metapb.PeerRole.Learner)) {
|
||||
peers.add(peer);
|
||||
}
|
||||
}
|
||||
return peers;
|
||||
}
|
||||
|
||||
public Peer getCurrentReplica() {
|
||||
return replicaList.get(replicaIdx);
|
||||
}
|
||||
|
||||
public Peer getNextReplica() {
|
||||
replicaIdx = (replicaIdx + 1) % replicaList.size();
|
||||
return getCurrentReplica();
|
||||
}
|
||||
|
||||
private boolean isLeader(Peer peer) {
|
||||
return getLeader().equals(peer);
|
||||
}
|
||||
|
||||
public long getId() {
|
||||
return this.meta.getId();
|
||||
}
|
||||
|
@ -177,26 +141,31 @@ public class TiRegion implements Serializable {
|
|||
return Key.toRawKey(getEndKey());
|
||||
}
|
||||
|
||||
public Kvrpcpb.Context getContext() {
|
||||
return getContext(java.util.Collections.emptySet());
|
||||
public Kvrpcpb.Context getLeaderContext() {
|
||||
return getContext(this.leader, java.util.Collections.emptySet(), false);
|
||||
}
|
||||
|
||||
public Kvrpcpb.Context getContext(Set<Long> resolvedLocks) {
|
||||
public Kvrpcpb.Context getReplicaContext(Set<Long> resolvedLocks, TiStoreType storeType) {
|
||||
Peer currentPeer = getCurrentReplica();
|
||||
boolean replicaRead = !isLeader(currentPeer) && TiStoreType.TiKV.equals(storeType);
|
||||
return getContext(currentPeer, resolvedLocks, replicaRead);
|
||||
}
|
||||
|
||||
public Kvrpcpb.Context getReplicaContext(Peer currentPeer, Set<Long> resolvedLocks) {
|
||||
return getContext(currentPeer, resolvedLocks, false);
|
||||
}
|
||||
|
||||
private Kvrpcpb.Context getContext(
|
||||
Peer currentPeer, Set<Long> resolvedLocks, boolean replicaRead) {
|
||||
|
||||
Kvrpcpb.Context.Builder builder = Kvrpcpb.Context.newBuilder();
|
||||
builder.setIsolationLevel(this.isolationLevel);
|
||||
builder.setPriority(this.commandPri);
|
||||
if (isReplicaRead) {
|
||||
builder
|
||||
.setRegionId(meta.getId())
|
||||
.setPeer(getCurrentFollower())
|
||||
.setReplicaRead(true)
|
||||
.setRegionEpoch(this.meta.getRegionEpoch());
|
||||
} else {
|
||||
builder
|
||||
.setRegionId(meta.getId())
|
||||
.setPeer(this.leader)
|
||||
.setRegionEpoch(this.meta.getRegionEpoch());
|
||||
}
|
||||
builder
|
||||
.setIsolationLevel(this.isolationLevel)
|
||||
.setPriority(this.commandPri)
|
||||
.setRegionId(meta.getId())
|
||||
.setPeer(currentPeer)
|
||||
.setReplicaRead(replicaRead)
|
||||
.setRegionEpoch(this.meta.getRegionEpoch());
|
||||
builder.addAllResolvedLocks(resolvedLocks);
|
||||
return builder.build();
|
||||
}
|
||||
|
@ -218,7 +187,7 @@ public class TiRegion implements Serializable {
|
|||
List<Peer> peers = meta.getPeersList();
|
||||
for (Peer p : peers) {
|
||||
if (p.getStoreId() == leaderStoreID) {
|
||||
return new TiRegion(this.meta, p, this.isolationLevel, this.commandPri, this.isReplicaRead);
|
||||
return new TiRegion(this.conf, this.meta, p, peers, this.stores);
|
||||
}
|
||||
}
|
||||
return null;
|
||||
|
|
|
@ -0,0 +1,116 @@
|
|||
package org.tikv.common.region;
|
||||
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import org.tikv.kvproto.Metapb;
|
||||
|
||||
public class TiStore {
|
||||
private static long MAX_FAIL_FORWARD_TIMES = 4;
|
||||
private final Metapb.Store store;
|
||||
private final Metapb.Store proxyStore;
|
||||
private AtomicBoolean reachable;
|
||||
private AtomicBoolean valid;
|
||||
private AtomicLong failForwardCount;
|
||||
private AtomicBoolean canForward;
|
||||
|
||||
public TiStore(Metapb.Store store) {
|
||||
this.store = store;
|
||||
this.reachable = new AtomicBoolean(true);
|
||||
this.valid = new AtomicBoolean(true);
|
||||
this.canForward = new AtomicBoolean(true);
|
||||
this.proxyStore = null;
|
||||
this.failForwardCount = new AtomicLong(0);
|
||||
}
|
||||
|
||||
private TiStore(Metapb.Store store, Metapb.Store proxyStore) {
|
||||
this.store = store;
|
||||
if (proxyStore != null) {
|
||||
this.reachable = new AtomicBoolean(false);
|
||||
} else {
|
||||
this.reachable = new AtomicBoolean(true);
|
||||
}
|
||||
this.valid = new AtomicBoolean(true);
|
||||
this.canForward = new AtomicBoolean(true);
|
||||
this.proxyStore = proxyStore;
|
||||
this.failForwardCount = new AtomicLong(0);
|
||||
}
|
||||
|
||||
@java.lang.Override
|
||||
public boolean equals(final java.lang.Object obj) {
|
||||
if (obj == this) {
|
||||
return true;
|
||||
}
|
||||
if (!(obj instanceof TiStore)) {
|
||||
return super.equals(obj);
|
||||
}
|
||||
TiStore other = (TiStore) obj;
|
||||
if (!this.store.equals(other.store)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (proxyStore == null && other.proxyStore == null) {
|
||||
return true;
|
||||
}
|
||||
if (proxyStore != null && other.proxyStore != null) {
|
||||
return proxyStore.equals(other.proxyStore);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
public TiStore withProxy(Metapb.Store proxyStore) {
|
||||
return new TiStore(this.store, proxyStore);
|
||||
}
|
||||
|
||||
public void markUnreachable() {
|
||||
this.reachable.set(false);
|
||||
}
|
||||
|
||||
public void markReachable() {
|
||||
this.reachable.set(true);
|
||||
}
|
||||
|
||||
public boolean isReachable() {
|
||||
return this.reachable.get();
|
||||
}
|
||||
|
||||
public boolean isValid() {
|
||||
return this.valid.get();
|
||||
}
|
||||
|
||||
public void markInvalid() {
|
||||
this.valid.set(false);
|
||||
}
|
||||
|
||||
public void forwardFail() {
|
||||
if (this.canForward.get()) {
|
||||
if (this.failForwardCount.addAndGet(1) >= MAX_FAIL_FORWARD_TIMES) {
|
||||
this.canForward.set(false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void makrCanForward() {
|
||||
this.failForwardCount.set(0);
|
||||
this.canForward.set(true);
|
||||
}
|
||||
|
||||
public boolean canForwardFirst() {
|
||||
return this.canForward.get();
|
||||
}
|
||||
|
||||
public Metapb.Store getStore() {
|
||||
return this.store;
|
||||
}
|
||||
|
||||
public String getAddress() {
|
||||
return this.store.getAddress();
|
||||
}
|
||||
|
||||
public Metapb.Store getProxyStore() {
|
||||
return this.proxyStore;
|
||||
}
|
||||
|
||||
public long getId() {
|
||||
return this.store.getId();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,36 @@
|
|||
/*
|
||||
* Copyright 2021 PingCAP, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.tikv.common.replica;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
public class FollowerReplicaSelector implements ReplicaSelector {
|
||||
@Override
|
||||
public List<Store> select(Region region) {
|
||||
Store[] stores = region.getStores();
|
||||
Store leader = region.getLeader();
|
||||
List<Store> list = new ArrayList<>(stores.length);
|
||||
for (Store store : stores) {
|
||||
if (!store.isLearner() && !leader.equals(store)) {
|
||||
list.add(store);
|
||||
}
|
||||
}
|
||||
Collections.shuffle(list);
|
||||
return list;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,37 @@
|
|||
/*
|
||||
* Copyright 2021 PingCAP, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.tikv.common.replica;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
public class LeaderFollowerReplicaSelector implements ReplicaSelector {
|
||||
@Override
|
||||
public List<Store> select(Region region) {
|
||||
Store[] stores = region.getStores();
|
||||
Store leader = region.getLeader();
|
||||
List<Store> list = new ArrayList<>(stores.length);
|
||||
for (Store store : stores) {
|
||||
if (!store.isLearner() && !leader.equals(store)) {
|
||||
list.add(store);
|
||||
}
|
||||
}
|
||||
Collections.shuffle(list);
|
||||
list.add(leader);
|
||||
return list;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,28 @@
|
|||
/*
|
||||
* Copyright 2021 PingCAP, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.tikv.common.replica;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
public class LeaderReplicaSelector implements ReplicaSelector {
|
||||
@Override
|
||||
public List<Store> select(Region region) {
|
||||
List<Store> list = new ArrayList<>(1);
|
||||
list.add(region.getLeader());
|
||||
return list;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,57 @@
|
|||
package org.tikv.common.replica;
|
||||
|
||||
import static com.google.common.base.MoreObjects.toStringHelper;
|
||||
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import org.tikv.common.region.TiStore;
|
||||
import org.tikv.kvproto.Metapb;
|
||||
|
||||
public class Region {
|
||||
private final Metapb.Region region;
|
||||
private final Store[] stores;
|
||||
private Store leaderStore;
|
||||
|
||||
public Region(
|
||||
final Metapb.Region region,
|
||||
final Metapb.Peer leader,
|
||||
final List<Metapb.Peer> peers,
|
||||
final List<TiStore> stores) {
|
||||
this.region = region;
|
||||
this.stores = new Store[stores.size()];
|
||||
Iterator<Metapb.Peer> peer = peers.iterator();
|
||||
Iterator<TiStore> store = stores.iterator();
|
||||
for (int idx = 0; idx < peers.size(); idx++) {
|
||||
Metapb.Peer currentPeer = peer.next();
|
||||
boolean isLeader = currentPeer.equals(leader);
|
||||
this.stores[idx] = new Store(currentPeer, store.next().getStore(), isLeader);
|
||||
if (isLeader) {
|
||||
leaderStore = this.stores[idx];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public Store[] getStores() {
|
||||
return stores;
|
||||
}
|
||||
|
||||
public Store getLeader() {
|
||||
return leaderStore;
|
||||
}
|
||||
|
||||
public long getId() {
|
||||
return region.getId();
|
||||
}
|
||||
|
||||
public byte[] getStartKey() {
|
||||
return region.getStartKey().toByteArray();
|
||||
}
|
||||
|
||||
public byte[] getEndKey() {
|
||||
return region.getEndKey().toByteArray();
|
||||
}
|
||||
|
||||
public String toString() {
|
||||
return toStringHelper(this).add("region", region).add("stores", stores).toString();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,27 @@
|
|||
/*
|
||||
* Copyright 2021 PingCAP, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.tikv.common.replica;
|
||||
|
||||
import java.io.Serializable;
|
||||
import java.util.List;
|
||||
|
||||
public interface ReplicaSelector extends Serializable {
|
||||
ReplicaSelector LEADER = new LeaderReplicaSelector();
|
||||
ReplicaSelector FOLLOWER = new FollowerReplicaSelector();
|
||||
ReplicaSelector LEADER_AND_FOLLOWER = new LeaderFollowerReplicaSelector();
|
||||
|
||||
List<Store> select(Region region);
|
||||
}
|
|
@ -0,0 +1,111 @@
|
|||
package org.tikv.common.replica;
|
||||
|
||||
import static com.google.common.base.MoreObjects.toStringHelper;
|
||||
|
||||
import java.util.List;
|
||||
import org.tikv.kvproto.Metapb;
|
||||
|
||||
public class Store {
|
||||
public static class Label {
|
||||
private final org.tikv.kvproto.Metapb.StoreLabel label;
|
||||
|
||||
Label(org.tikv.kvproto.Metapb.StoreLabel label) {
|
||||
this.label = label;
|
||||
}
|
||||
|
||||
public String getKey() {
|
||||
return label.getKey();
|
||||
}
|
||||
|
||||
public String getValue() {
|
||||
return label.getValue();
|
||||
}
|
||||
}
|
||||
|
||||
public enum State {
|
||||
Unknown,
|
||||
Up,
|
||||
Offline,
|
||||
Tombstone
|
||||
}
|
||||
|
||||
private static final Label[] EMPTY_LABELS = new Label[0];
|
||||
private Label[] labels;
|
||||
private final Metapb.Peer peer;
|
||||
private final Metapb.Store store;
|
||||
private final boolean isLeader;
|
||||
|
||||
Store(
|
||||
final org.tikv.kvproto.Metapb.Peer peer,
|
||||
final org.tikv.kvproto.Metapb.Store store,
|
||||
boolean isLeader) {
|
||||
this.peer = peer;
|
||||
this.store = store;
|
||||
this.isLeader = isLeader;
|
||||
}
|
||||
|
||||
public Metapb.Peer getPeer() {
|
||||
return peer;
|
||||
}
|
||||
|
||||
public Label[] getLabels() {
|
||||
if (labels == null) {
|
||||
List<Metapb.StoreLabel> labelList = store.getLabelsList();
|
||||
if (labelList.isEmpty()) {
|
||||
labels = EMPTY_LABELS;
|
||||
} else {
|
||||
labels = labelList.stream().map(Label::new).toArray(Label[]::new);
|
||||
}
|
||||
}
|
||||
return labels;
|
||||
}
|
||||
|
||||
public boolean isLearner() {
|
||||
return peer.getRole() == Metapb.PeerRole.Learner;
|
||||
}
|
||||
|
||||
public boolean isLeader() {
|
||||
return isLeader;
|
||||
}
|
||||
|
||||
public boolean isFollower() {
|
||||
return peer.getRole() == Metapb.PeerRole.Voter && !isLeader;
|
||||
}
|
||||
|
||||
public long getId() {
|
||||
return store.getId();
|
||||
}
|
||||
|
||||
public String getAddress() {
|
||||
return store.getAddress();
|
||||
}
|
||||
|
||||
public String getVersion() {
|
||||
return store.getVersion();
|
||||
}
|
||||
|
||||
public State getState() {
|
||||
switch (store.getState()) {
|
||||
case Up:
|
||||
return State.Up;
|
||||
case Offline:
|
||||
return State.Offline;
|
||||
case Tombstone:
|
||||
return State.Tombstone;
|
||||
default:
|
||||
return State.Unknown;
|
||||
}
|
||||
}
|
||||
|
||||
public boolean equals(Object o) {
|
||||
if (!(o instanceof Store)) {
|
||||
return false;
|
||||
}
|
||||
Store other = (Store) o;
|
||||
return this.peer.equals(other.peer);
|
||||
}
|
||||
|
||||
public String toString() {
|
||||
return toStringHelper(this).add("peer", peer).add("store", store).toString();
|
||||
}
|
||||
}
|
|
@ -56,7 +56,7 @@ public class Batch {
|
|||
}
|
||||
|
||||
public BackOffer getBackOffer() {
|
||||
return backOffer;
|
||||
return ConcreteBackOffer.create(backOffer);
|
||||
}
|
||||
|
||||
public TiRegion getRegion() {
|
||||
|
|
|
@ -121,7 +121,7 @@ public class ClientUtils {
|
|||
|
||||
public static Map<TiRegion, List<ByteString>> groupKeysByRegion(
|
||||
RegionManager regionManager, List<ByteString> keys, BackOffer backoffer) {
|
||||
return groupKeysByRegion(regionManager, new ArrayList<>(keys), backoffer, false);
|
||||
return groupKeysByRegion(regionManager, keys, backoffer, false);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -102,10 +102,10 @@ public class ConcreteBackOffer implements BackOffer {
|
|||
backOffFunction = BackOffFunction.create(200, 3000, BackOffStrategy.EqualJitter);
|
||||
break;
|
||||
case BoPDRPC:
|
||||
backOffFunction = BackOffFunction.create(500, 3000, BackOffStrategy.EqualJitter);
|
||||
backOffFunction = BackOffFunction.create(100, 600, BackOffStrategy.EqualJitter);
|
||||
break;
|
||||
case BoTiKVRPC:
|
||||
backOffFunction = BackOffFunction.create(100, 2000, BackOffStrategy.EqualJitter);
|
||||
backOffFunction = BackOffFunction.create(100, 400, BackOffStrategy.EqualJitter);
|
||||
break;
|
||||
case BoTxnNotFound:
|
||||
backOffFunction = BackOffFunction.create(2, 500, BackOffStrategy.NoJitter);
|
||||
|
|
|
@ -29,9 +29,9 @@ import org.tikv.common.key.RowKey;
|
|||
import org.tikv.common.pd.PDUtils;
|
||||
import org.tikv.common.region.RegionManager;
|
||||
import org.tikv.common.region.TiRegion;
|
||||
import org.tikv.common.region.TiStore;
|
||||
import org.tikv.common.region.TiStoreType;
|
||||
import org.tikv.kvproto.Coprocessor.KeyRange;
|
||||
import org.tikv.kvproto.Metapb;
|
||||
|
||||
public class RangeSplitter {
|
||||
private final RegionManager regionManager;
|
||||
|
@ -51,12 +51,11 @@ public class RangeSplitter {
|
|||
* @param handles Handle list
|
||||
* @return <Region, HandleList> map
|
||||
*/
|
||||
public Map<Pair<TiRegion, Metapb.Store>, TLongArrayList> groupByAndSortHandlesByRegionId(
|
||||
public Map<Pair<TiRegion, TiStore>, TLongArrayList> groupByAndSortHandlesByRegionId(
|
||||
long tableId, TLongArrayList handles) {
|
||||
TLongObjectHashMap<TLongArrayList> regionHandles = new TLongObjectHashMap<>();
|
||||
TLongObjectHashMap<Pair<TiRegion, Metapb.Store>> idToRegionStorePair =
|
||||
new TLongObjectHashMap<>();
|
||||
Map<Pair<TiRegion, Metapb.Store>, TLongArrayList> result = new HashMap<>();
|
||||
TLongObjectHashMap<Pair<TiRegion, TiStore>> idToRegionStorePair = new TLongObjectHashMap<>();
|
||||
Map<Pair<TiRegion, TiStore>, TLongArrayList> result = new HashMap<>();
|
||||
handles.sort();
|
||||
|
||||
byte[] endKey = null;
|
||||
|
@ -71,7 +70,7 @@ public class RangeSplitter {
|
|||
regionHandles.put(curRegion.getId(), handlesInCurRegion);
|
||||
handlesInCurRegion = new TLongArrayList();
|
||||
}
|
||||
Pair<TiRegion, Metapb.Store> regionStorePair =
|
||||
Pair<TiRegion, TiStore> regionStorePair =
|
||||
regionManager.getRegionStorePairByKey(ByteString.copyFrom(key.getBytes()));
|
||||
curRegion = regionStorePair.first;
|
||||
idToRegionStorePair.put(curRegion.getId(), regionStorePair);
|
||||
|
@ -84,7 +83,7 @@ public class RangeSplitter {
|
|||
}
|
||||
regionHandles.forEachEntry(
|
||||
(k, v) -> {
|
||||
Pair<TiRegion, Metapb.Store> regionStorePair = idToRegionStorePair.get(k);
|
||||
Pair<TiRegion, TiStore> regionStorePair = idToRegionStorePair.get(k);
|
||||
result.put(regionStorePair, v);
|
||||
return true;
|
||||
});
|
||||
|
@ -110,7 +109,7 @@ public class RangeSplitter {
|
|||
// Max value for current index handle range
|
||||
ImmutableList.Builder<RegionTask> regionTasks = ImmutableList.builder();
|
||||
|
||||
Map<Pair<TiRegion, Metapb.Store>, TLongArrayList> regionHandlesMap =
|
||||
Map<Pair<TiRegion, TiStore>, TLongArrayList> regionHandlesMap =
|
||||
groupByAndSortHandlesByRegionId(tableId, handles);
|
||||
|
||||
regionHandlesMap.forEach((k, v) -> createTask(0, v.size(), tableId, v, k, regionTasks));
|
||||
|
@ -123,7 +122,7 @@ public class RangeSplitter {
|
|||
int endPos,
|
||||
long tableId,
|
||||
TLongArrayList handles,
|
||||
Pair<TiRegion, Metapb.Store> regionStorePair,
|
||||
Pair<TiRegion, TiStore> regionStorePair,
|
||||
ImmutableList.Builder<RegionTask> regionTasks) {
|
||||
List<KeyRange> newKeyRanges = new ArrayList<>(endPos - startPos + 1);
|
||||
long startHandle = handles.get(startPos);
|
||||
|
@ -163,10 +162,10 @@ public class RangeSplitter {
|
|||
int i = 0;
|
||||
KeyRange range = keyRanges.get(i++);
|
||||
Map<Long, List<KeyRange>> idToRange = new HashMap<>(); // region id to keyRange list
|
||||
Map<Long, Pair<TiRegion, Metapb.Store>> idToRegion = new HashMap<>();
|
||||
Map<Long, Pair<TiRegion, TiStore>> idToRegion = new HashMap<>();
|
||||
|
||||
while (true) {
|
||||
Pair<TiRegion, Metapb.Store> regionStorePair =
|
||||
Pair<TiRegion, TiStore> regionStorePair =
|
||||
regionManager.getRegionStorePairByKey(range.getStart(), storeType);
|
||||
|
||||
if (regionStorePair == null) {
|
||||
|
@ -203,7 +202,7 @@ public class RangeSplitter {
|
|||
ImmutableList.Builder<RegionTask> resultBuilder = ImmutableList.builder();
|
||||
idToRange.forEach(
|
||||
(k, v) -> {
|
||||
Pair<TiRegion, Metapb.Store> regionStorePair = idToRegion.get(k);
|
||||
Pair<TiRegion, TiStore> regionStorePair = idToRegion.get(k);
|
||||
resultBuilder.add(new RegionTask(regionStorePair.first, regionStorePair.second, v));
|
||||
});
|
||||
return resultBuilder.build();
|
||||
|
@ -221,24 +220,23 @@ public class RangeSplitter {
|
|||
|
||||
public static class RegionTask implements Serializable {
|
||||
private final TiRegion region;
|
||||
private final Metapb.Store store;
|
||||
private final TiStore store;
|
||||
private final List<KeyRange> ranges;
|
||||
private final String host;
|
||||
|
||||
RegionTask(TiRegion region, Metapb.Store store, List<KeyRange> ranges) {
|
||||
RegionTask(TiRegion region, TiStore store, List<KeyRange> ranges) {
|
||||
this.region = region;
|
||||
this.store = store;
|
||||
this.ranges = ranges;
|
||||
String host = null;
|
||||
try {
|
||||
host = PDUtils.addrToUri(store.getAddress()).getHost();
|
||||
host = PDUtils.addrToUri(store.getStore().getAddress()).getHost();
|
||||
} catch (Exception ignored) {
|
||||
}
|
||||
this.host = host;
|
||||
}
|
||||
|
||||
public static RegionTask newInstance(
|
||||
TiRegion region, Metapb.Store store, List<KeyRange> ranges) {
|
||||
public static RegionTask newInstance(TiRegion region, TiStore store, List<KeyRange> ranges) {
|
||||
return new RegionTask(region, store, ranges);
|
||||
}
|
||||
|
||||
|
@ -246,7 +244,7 @@ public class RangeSplitter {
|
|||
return region;
|
||||
}
|
||||
|
||||
public Metapb.Store getStore() {
|
||||
public TiStore getStore() {
|
||||
return store;
|
||||
}
|
||||
|
||||
|
|
|
@ -112,6 +112,21 @@ public class RawKVClient implements AutoCloseable {
|
|||
* @param ttl the ttl of the key (in seconds), 0 means the key will never be outdated
|
||||
*/
|
||||
public void put(ByteString key, ByteString value, long ttl) {
|
||||
put(key, value, ttl, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Put a raw key-value pair to TiKV. This API is atomic.
|
||||
*
|
||||
* @param key raw key
|
||||
* @param value raw value
|
||||
* @param ttl the ttl of the key (in seconds), 0 means the key will never be outdated
|
||||
*/
|
||||
public void putAtomic(ByteString key, ByteString value, long ttl) {
|
||||
put(key, value, ttl, true);
|
||||
}
|
||||
|
||||
private void put(ByteString key, ByteString value, long ttl, boolean atomic) {
|
||||
String label = "client_raw_put";
|
||||
Histogram.Timer requestTimer = RAW_REQUEST_LATENCY.labels(label).startTimer();
|
||||
try {
|
||||
|
@ -119,7 +134,7 @@ public class RawKVClient implements AutoCloseable {
|
|||
while (true) {
|
||||
RegionStoreClient client = clientBuilder.build(key);
|
||||
try {
|
||||
client.rawPut(backOffer, key, value, ttl);
|
||||
client.rawPut(backOffer, key, value, ttl, atomic);
|
||||
RAW_REQUEST_SUCCESS.labels(label).inc();
|
||||
return;
|
||||
} catch (final TiKVException e) {
|
||||
|
@ -220,7 +235,7 @@ public class RawKVClient implements AutoCloseable {
|
|||
String label = "client_raw_batch_put";
|
||||
Histogram.Timer requestTimer = RAW_REQUEST_LATENCY.labels(label).startTimer();
|
||||
try {
|
||||
doSendBatchPut(ConcreteBackOffer.newRawKVBackOff(), kvPairs, ttl, atomic);
|
||||
doSendBatchPut(defaultBackOff(), kvPairs, ttl, atomic);
|
||||
RAW_REQUEST_SUCCESS.labels(label).inc();
|
||||
} catch (Exception e) {
|
||||
RAW_REQUEST_FAILURE.labels(label).inc();
|
||||
|
@ -528,6 +543,19 @@ public class RawKVClient implements AutoCloseable {
|
|||
* @param key raw key to be deleted
|
||||
*/
|
||||
public void delete(ByteString key) {
|
||||
delete(key, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Delete a raw key-value pair from TiKV if key exists. This API is atomic.
|
||||
*
|
||||
* @param key raw key to be deleted
|
||||
*/
|
||||
public void deleteAtomic(ByteString key) {
|
||||
delete(key, true);
|
||||
}
|
||||
|
||||
private void delete(ByteString key, boolean atomic) {
|
||||
String label = "client_raw_delete";
|
||||
Histogram.Timer requestTimer = RAW_REQUEST_LATENCY.labels(label).startTimer();
|
||||
try {
|
||||
|
@ -535,7 +563,7 @@ public class RawKVClient implements AutoCloseable {
|
|||
while (true) {
|
||||
RegionStoreClient client = clientBuilder.build(key);
|
||||
try {
|
||||
client.rawDelete(defaultBackOff(), key);
|
||||
client.rawDelete(defaultBackOff(), key, atomic);
|
||||
RAW_REQUEST_SUCCESS.labels(label).inc();
|
||||
return;
|
||||
} catch (final TiKVException e) {
|
||||
|
@ -622,12 +650,13 @@ public class RawKVClient implements AutoCloseable {
|
|||
private List<Batch> doSendBatchPutInBatchesWithRetry(
|
||||
BackOffer backOffer, Batch batch, long ttl, boolean atomic) {
|
||||
try (RegionStoreClient client = clientBuilder.build(batch.getRegion())) {
|
||||
client.setTimeout(conf.getScanTimeout());
|
||||
client.rawBatchPut(backOffer, batch, ttl, atomic);
|
||||
return new ArrayList<>();
|
||||
} catch (final TiKVException e) {
|
||||
// TODO: any elegant way to re-split the ranges if fails?
|
||||
backOffer.doBackOff(BackOffFunction.BackOffFuncType.BoRegionMiss, e);
|
||||
logger.warn("ReSplitting ranges for BatchPutRequest");
|
||||
logger.debug("ReSplitting ranges for BatchPutRequest");
|
||||
// retry
|
||||
return doSendBatchPutWithRefetchRegion(backOffer, batch);
|
||||
}
|
||||
|
@ -685,7 +714,7 @@ public class RawKVClient implements AutoCloseable {
|
|||
} catch (final TiKVException e) {
|
||||
backOffer.doBackOff(BackOffFunction.BackOffFuncType.BoRegionMiss, e);
|
||||
clientBuilder.getRegionManager().invalidateRegion(batch.getRegion());
|
||||
logger.warn("ReSplitting ranges for BatchGetRequest", e);
|
||||
logger.debug("ReSplitting ranges for BatchGetRequest", e);
|
||||
|
||||
// retry
|
||||
return Pair.create(doSendBatchGetWithRefetchRegion(backOffer, batch), new ArrayList<>());
|
||||
|
@ -726,7 +755,7 @@ public class RawKVClient implements AutoCloseable {
|
|||
} catch (final TiKVException e) {
|
||||
backOffer.doBackOff(BackOffFunction.BackOffFuncType.BoRegionMiss, e);
|
||||
clientBuilder.getRegionManager().invalidateRegion(batch.getRegion());
|
||||
logger.warn("ReSplitting ranges for BatchGetRequest", e);
|
||||
logger.debug("ReSplitting ranges for BatchGetRequest", e);
|
||||
|
||||
// retry
|
||||
return doSendBatchDeleteWithRefetchRegion(backOffer, batch);
|
||||
|
@ -776,7 +805,7 @@ public class RawKVClient implements AutoCloseable {
|
|||
} catch (final TiKVException e) {
|
||||
backOffer.doBackOff(BackOffFunction.BackOffFuncType.BoRegionMiss, e);
|
||||
clientBuilder.getRegionManager().invalidateRegion(range.getRegion());
|
||||
logger.warn("ReSplitting ranges for BatchDeleteRangeRequest", e);
|
||||
logger.debug("ReSplitting ranges for BatchDeleteRangeRequest", e);
|
||||
|
||||
// retry
|
||||
return doSendDeleteRangeWithRefetchRegion(backOffer, range);
|
||||
|
@ -810,7 +839,9 @@ public class RawKVClient implements AutoCloseable {
|
|||
private List<TiRegion> fetchRegionsFromRange(
|
||||
BackOffer backOffer, ByteString startKey, ByteString endKey) {
|
||||
List<TiRegion> regions = new ArrayList<>();
|
||||
while (startKey.isEmpty() || Key.toRawKey(startKey).compareTo(Key.toRawKey(endKey)) < 0) {
|
||||
while (startKey.isEmpty()
|
||||
|| endKey.isEmpty()
|
||||
|| Key.toRawKey(startKey).compareTo(Key.toRawKey(endKey)) < 0) {
|
||||
TiRegion currentRegion = clientBuilder.getRegionManager().getRegionByKey(startKey, backOffer);
|
||||
regions.add(currentRegion);
|
||||
startKey = currentRegion.getEndKey();
|
||||
|
@ -835,6 +866,6 @@ public class RawKVClient implements AutoCloseable {
|
|||
}
|
||||
|
||||
private BackOffer defaultBackOff() {
|
||||
return ConcreteBackOffer.newRawKVBackOff();
|
||||
return ConcreteBackOffer.newCustomBackOff(conf.getRawKVDefaultBackoffInMS());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.tikv.common.exception.KeyException;
|
|||
import org.tikv.common.region.RegionManager;
|
||||
import org.tikv.common.region.RegionStoreClient;
|
||||
import org.tikv.common.region.TiRegion;
|
||||
import org.tikv.common.region.TiStore;
|
||||
import org.tikv.common.util.BackOffer;
|
||||
import org.tikv.common.util.ChannelFactory;
|
||||
import org.tikv.kvproto.Kvrpcpb;
|
||||
|
@ -66,22 +67,23 @@ public interface AbstractLockResolverClient {
|
|||
}
|
||||
|
||||
static AbstractLockResolverClient getInstance(
|
||||
String storeVersion,
|
||||
TiConfiguration conf,
|
||||
TiRegion region,
|
||||
TiStore store,
|
||||
TikvGrpc.TikvBlockingStub blockingStub,
|
||||
TikvGrpc.TikvStub asyncStub,
|
||||
ChannelFactory channelFactory,
|
||||
RegionManager regionManager,
|
||||
PDClient pdClient,
|
||||
RegionStoreClient.RegionStoreClientBuilder clientBuilder) {
|
||||
if (StoreVersion.compareTo(storeVersion, Version.RESOLVE_LOCK_V3) < 0) {
|
||||
if (StoreVersion.compareTo(store.getStore().getVersion(), Version.RESOLVE_LOCK_V3) < 0) {
|
||||
return new LockResolverClientV2(
|
||||
conf, region, blockingStub, asyncStub, channelFactory, regionManager);
|
||||
} else if (StoreVersion.compareTo(storeVersion, Version.RESOLVE_LOCK_V4) < 0) {
|
||||
conf, region, store, blockingStub, asyncStub, channelFactory, regionManager);
|
||||
} else if (StoreVersion.compareTo(store.getStore().getVersion(), Version.RESOLVE_LOCK_V4) < 0) {
|
||||
return new LockResolverClientV3(
|
||||
conf,
|
||||
region,
|
||||
store,
|
||||
blockingStub,
|
||||
asyncStub,
|
||||
channelFactory,
|
||||
|
@ -92,6 +94,7 @@ public interface AbstractLockResolverClient {
|
|||
return new LockResolverClientV4(
|
||||
conf,
|
||||
region,
|
||||
store,
|
||||
blockingStub,
|
||||
asyncStub,
|
||||
channelFactory,
|
||||
|
|
|
@ -1,62 +0,0 @@
|
|||
/*
|
||||
* Copyright 2017 PingCAP, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.tikv.txn;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import org.tikv.common.region.TiRegion;
|
||||
import org.tikv.kvproto.Metapb;
|
||||
|
||||
public class BatchKeys {
|
||||
private final TiRegion region;
|
||||
private final Metapb.Store store;
|
||||
private List<ByteString> keys;
|
||||
private final int sizeInBytes;
|
||||
|
||||
public BatchKeys(
|
||||
TiRegion region, Metapb.Store store, List<ByteString> keysInput, int sizeInBytes) {
|
||||
this.region = region;
|
||||
this.store = store;
|
||||
this.keys = new ArrayList<>();
|
||||
this.keys.addAll(keysInput);
|
||||
this.sizeInBytes = sizeInBytes;
|
||||
}
|
||||
|
||||
public List<ByteString> getKeys() {
|
||||
return keys;
|
||||
}
|
||||
|
||||
public void setKeys(List<ByteString> keys) {
|
||||
this.keys = keys;
|
||||
}
|
||||
|
||||
public TiRegion getRegion() {
|
||||
return region;
|
||||
}
|
||||
|
||||
public Metapb.Store getStore() {
|
||||
return store;
|
||||
}
|
||||
|
||||
public int getSizeInBytes() {
|
||||
return sizeInBytes;
|
||||
}
|
||||
|
||||
public float getSizeInKB() {
|
||||
return ((float) sizeInBytes) / 1024;
|
||||
}
|
||||
}
|
|
@ -1,52 +0,0 @@
|
|||
/*
|
||||
* Copyright 2019 The TiKV Project Authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.tikv.txn;
|
||||
|
||||
public class ClientRPCResult {
|
||||
private boolean success;
|
||||
private boolean retry;
|
||||
private Exception exception;
|
||||
|
||||
public ClientRPCResult(boolean success, boolean retry, Exception exception) {
|
||||
this.success = success;
|
||||
this.retry = retry;
|
||||
this.exception = exception;
|
||||
}
|
||||
|
||||
public boolean isSuccess() {
|
||||
return success;
|
||||
}
|
||||
|
||||
public void setSuccess(boolean success) {
|
||||
this.success = success;
|
||||
}
|
||||
|
||||
public boolean isRetry() {
|
||||
return retry;
|
||||
}
|
||||
|
||||
public void setRetry(boolean retry) {
|
||||
this.retry = retry;
|
||||
}
|
||||
|
||||
public Exception getException() {
|
||||
return exception;
|
||||
}
|
||||
|
||||
public void setException(Exception exception) {
|
||||
this.exception = exception;
|
||||
}
|
||||
}
|
|
@ -1,41 +0,0 @@
|
|||
/*
|
||||
* Copyright 2017 PingCAP, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.tikv.txn;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import org.tikv.common.region.TiRegion;
|
||||
import org.tikv.common.util.Pair;
|
||||
import org.tikv.kvproto.Metapb;
|
||||
|
||||
public class GroupKeyResult {
|
||||
|
||||
private Map<Pair<TiRegion, Metapb.Store>, List<ByteString>> groupsResult;
|
||||
|
||||
public GroupKeyResult() {
|
||||
this.groupsResult = new HashMap<>();
|
||||
}
|
||||
|
||||
public Map<Pair<TiRegion, Metapb.Store>, List<ByteString>> getGroupsResult() {
|
||||
return groupsResult;
|
||||
}
|
||||
|
||||
public void setGroupsResult(Map<Pair<TiRegion, Metapb.Store>, List<ByteString>> groupsResult) {
|
||||
this.groupsResult = groupsResult;
|
||||
}
|
||||
}
|
|
@ -42,6 +42,7 @@ import org.tikv.common.region.AbstractRegionStoreClient;
|
|||
import org.tikv.common.region.RegionManager;
|
||||
import org.tikv.common.region.TiRegion;
|
||||
import org.tikv.common.region.TiRegion.RegionVerID;
|
||||
import org.tikv.common.region.TiStore;
|
||||
import org.tikv.common.util.BackOffer;
|
||||
import org.tikv.common.util.ChannelFactory;
|
||||
import org.tikv.common.util.TsoUtils;
|
||||
|
@ -74,11 +75,12 @@ public class LockResolverClientV2 extends AbstractRegionStoreClient
|
|||
public LockResolverClientV2(
|
||||
TiConfiguration conf,
|
||||
TiRegion region,
|
||||
TiStore store,
|
||||
TikvBlockingStub blockingStub,
|
||||
TikvStub asyncStub,
|
||||
ChannelFactory channelFactory,
|
||||
RegionManager regionManager) {
|
||||
super(conf, region, channelFactory, blockingStub, asyncStub, regionManager);
|
||||
super(conf, region, store, channelFactory, blockingStub, asyncStub, regionManager);
|
||||
resolved = new HashMap<>();
|
||||
recentResolved = new LinkedList<>();
|
||||
readWriteLock = new ReentrantReadWriteLock();
|
||||
|
@ -125,7 +127,7 @@ public class LockResolverClientV2 extends AbstractRegionStoreClient
|
|||
Supplier<CleanupRequest> factory =
|
||||
() ->
|
||||
CleanupRequest.newBuilder()
|
||||
.setContext(region.getContext())
|
||||
.setContext(region.getLeaderContext())
|
||||
.setKey(primary)
|
||||
.setStartVersion(txnID)
|
||||
.build();
|
||||
|
@ -232,7 +234,7 @@ public class LockResolverClientV2 extends AbstractRegionStoreClient
|
|||
factory =
|
||||
() ->
|
||||
ResolveLockRequest.newBuilder()
|
||||
.setContext(region.getContext())
|
||||
.setContext(region.getLeaderContext())
|
||||
.setStartVersion(lock.getTxnID())
|
||||
.setCommitVersion(txnStatus)
|
||||
.build();
|
||||
|
@ -240,7 +242,7 @@ public class LockResolverClientV2 extends AbstractRegionStoreClient
|
|||
factory =
|
||||
() ->
|
||||
ResolveLockRequest.newBuilder()
|
||||
.setContext(region.getContext())
|
||||
.setContext(region.getLeaderContext())
|
||||
.setStartVersion(lock.getTxnID())
|
||||
.build();
|
||||
}
|
||||
|
|
|
@ -39,10 +39,7 @@ import org.tikv.common.exception.KeyException;
|
|||
import org.tikv.common.exception.RegionException;
|
||||
import org.tikv.common.exception.TiClientInternalException;
|
||||
import org.tikv.common.operation.KVErrorHandler;
|
||||
import org.tikv.common.region.AbstractRegionStoreClient;
|
||||
import org.tikv.common.region.RegionManager;
|
||||
import org.tikv.common.region.RegionStoreClient;
|
||||
import org.tikv.common.region.TiRegion;
|
||||
import org.tikv.common.region.*;
|
||||
import org.tikv.common.region.TiRegion.RegionVerID;
|
||||
import org.tikv.common.util.BackOffer;
|
||||
import org.tikv.common.util.ChannelFactory;
|
||||
|
@ -79,13 +76,14 @@ public class LockResolverClientV3 extends AbstractRegionStoreClient
|
|||
public LockResolverClientV3(
|
||||
TiConfiguration conf,
|
||||
TiRegion region,
|
||||
TiStore store,
|
||||
TikvBlockingStub blockingStub,
|
||||
TikvStub asyncStub,
|
||||
ChannelFactory channelFactory,
|
||||
RegionManager regionManager,
|
||||
PDClient pdClient,
|
||||
RegionStoreClient.RegionStoreClientBuilder clientBuilder) {
|
||||
super(conf, region, channelFactory, blockingStub, asyncStub, regionManager);
|
||||
super(conf, region, store, channelFactory, blockingStub, asyncStub, regionManager);
|
||||
resolved = new HashMap<>();
|
||||
recentResolved = new LinkedList<>();
|
||||
readWriteLock = new ReentrantReadWriteLock();
|
||||
|
@ -151,7 +149,7 @@ public class LockResolverClientV3 extends AbstractRegionStoreClient
|
|||
|
||||
Kvrpcpb.ResolveLockRequest.Builder builder =
|
||||
Kvrpcpb.ResolveLockRequest.newBuilder()
|
||||
.setContext(region.getContext())
|
||||
.setContext(region.getLeaderContext())
|
||||
.setStartVersion(lock.getTxnID());
|
||||
|
||||
if (txnStatus.isCommitted()) {
|
||||
|
@ -230,7 +228,7 @@ public class LockResolverClientV3 extends AbstractRegionStoreClient
|
|||
() -> {
|
||||
TiRegion primaryKeyRegion = regionManager.getRegionByKey(primary);
|
||||
return CleanupRequest.newBuilder()
|
||||
.setContext(primaryKeyRegion.getContext())
|
||||
.setContext(primaryKeyRegion.getLeaderContext())
|
||||
.setKey(primary)
|
||||
.setStartVersion(txnID)
|
||||
.setCurrentTs(currentTS)
|
||||
|
|
|
@ -39,10 +39,7 @@ import org.tikv.common.exception.KeyException;
|
|||
import org.tikv.common.exception.RegionException;
|
||||
import org.tikv.common.exception.TiClientInternalException;
|
||||
import org.tikv.common.operation.KVErrorHandler;
|
||||
import org.tikv.common.region.AbstractRegionStoreClient;
|
||||
import org.tikv.common.region.RegionManager;
|
||||
import org.tikv.common.region.RegionStoreClient;
|
||||
import org.tikv.common.region.TiRegion;
|
||||
import org.tikv.common.region.*;
|
||||
import org.tikv.common.region.TiRegion.RegionVerID;
|
||||
import org.tikv.common.util.BackOffer;
|
||||
import org.tikv.common.util.ChannelFactory;
|
||||
|
@ -79,13 +76,14 @@ public class LockResolverClientV4 extends AbstractRegionStoreClient
|
|||
public LockResolverClientV4(
|
||||
TiConfiguration conf,
|
||||
TiRegion region,
|
||||
TiStore store,
|
||||
TikvBlockingStub blockingStub,
|
||||
TikvStub asyncStub,
|
||||
ChannelFactory channelFactory,
|
||||
RegionManager regionManager,
|
||||
PDClient pdClient,
|
||||
RegionStoreClient.RegionStoreClientBuilder clientBuilder) {
|
||||
super(conf, region, channelFactory, blockingStub, asyncStub, regionManager);
|
||||
super(conf, region, store, channelFactory, blockingStub, asyncStub, regionManager);
|
||||
resolved = new HashMap<>();
|
||||
recentResolved = new LinkedList<>();
|
||||
readWriteLock = new ReentrantReadWriteLock();
|
||||
|
@ -169,7 +167,7 @@ public class LockResolverClientV4 extends AbstractRegionStoreClient
|
|||
Supplier<Kvrpcpb.PessimisticRollbackRequest> factory =
|
||||
() ->
|
||||
Kvrpcpb.PessimisticRollbackRequest.newBuilder()
|
||||
.setContext(region.getContext())
|
||||
.setContext(region.getLeaderContext())
|
||||
.setStartVersion(lock.getTxnID())
|
||||
.setForUpdateTs(forUpdateTS)
|
||||
.addKeys(lock.getKey())
|
||||
|
@ -287,7 +285,7 @@ public class LockResolverClientV4 extends AbstractRegionStoreClient
|
|||
() -> {
|
||||
TiRegion primaryKeyRegion = regionManager.getRegionByKey(primary);
|
||||
return Kvrpcpb.CheckTxnStatusRequest.newBuilder()
|
||||
.setContext(primaryKeyRegion.getContext())
|
||||
.setContext(primaryKeyRegion.getLeaderContext())
|
||||
.setPrimaryKey(primary)
|
||||
.setLockTs(txnID)
|
||||
.setCallerStartTs(callerStartTS)
|
||||
|
@ -364,7 +362,7 @@ public class LockResolverClientV4 extends AbstractRegionStoreClient
|
|||
|
||||
Kvrpcpb.ResolveLockRequest.Builder builder =
|
||||
Kvrpcpb.ResolveLockRequest.newBuilder()
|
||||
.setContext(region.getContext())
|
||||
.setContext(region.getLeaderContext())
|
||||
.setStartVersion(lock.getTxnID());
|
||||
|
||||
if (txnStatus.isCommitted()) {
|
||||
|
|
|
@ -30,11 +30,11 @@ import org.tikv.common.exception.TiBatchWriteException;
|
|||
import org.tikv.common.meta.TiTimestamp;
|
||||
import org.tikv.common.region.RegionManager;
|
||||
import org.tikv.common.region.TiRegion;
|
||||
import org.tikv.common.region.TiStore;
|
||||
import org.tikv.common.util.BackOffFunction;
|
||||
import org.tikv.common.util.BackOffer;
|
||||
import org.tikv.common.util.ConcreteBackOffer;
|
||||
import org.tikv.common.util.Pair;
|
||||
import org.tikv.kvproto.Metapb;
|
||||
import org.tikv.txn.type.ClientRPCResult;
|
||||
|
||||
/**
|
||||
|
@ -105,9 +105,9 @@ public class TTLManager {
|
|||
}
|
||||
|
||||
private void sendTxnHeartBeat(BackOffer bo, long ttl) {
|
||||
Pair<TiRegion, Metapb.Store> pair = regionManager.getRegionStorePairByKey(primaryLock);
|
||||
Pair<TiRegion, TiStore> pair = regionManager.getRegionStorePairByKey(primaryLock);
|
||||
TiRegion tiRegion = pair.first;
|
||||
Metapb.Store store = pair.second;
|
||||
TiStore store = pair.second;
|
||||
|
||||
ClientRPCResult result = kvClient.txnHeartBeat(bo, primaryLock, startTS, ttl, tiRegion, store);
|
||||
|
||||
|
@ -121,7 +121,7 @@ public class TTLManager {
|
|||
new GrpcException(
|
||||
String.format("sendTxnHeartBeat failed, regionId=%s", tiRegion.getId()),
|
||||
result.getException()));
|
||||
this.regionManager.invalidateStore(store.getId());
|
||||
this.regionManager.invalidateStore(store.getStore().getId());
|
||||
this.regionManager.invalidateRegion(tiRegion);
|
||||
// re-split keys and commit again.
|
||||
sendTxnHeartBeat(bo, ttl);
|
||||
|
|
|
@ -38,13 +38,13 @@ import org.tikv.common.exception.GrpcException;
|
|||
import org.tikv.common.exception.TiBatchWriteException;
|
||||
import org.tikv.common.region.RegionManager;
|
||||
import org.tikv.common.region.TiRegion;
|
||||
import org.tikv.common.region.TiStore;
|
||||
import org.tikv.common.util.BackOffFunction;
|
||||
import org.tikv.common.util.BackOffer;
|
||||
import org.tikv.common.util.ConcreteBackOffer;
|
||||
import org.tikv.common.util.Pair;
|
||||
import org.tikv.kvproto.Kvrpcpb;
|
||||
import org.tikv.kvproto.Kvrpcpb.Op;
|
||||
import org.tikv.kvproto.Metapb;
|
||||
import org.tikv.txn.type.BatchKeys;
|
||||
import org.tikv.txn.type.ClientRPCResult;
|
||||
import org.tikv.txn.type.GroupKeyResult;
|
||||
|
@ -146,9 +146,9 @@ public class TwoPhaseCommitter {
|
|||
|
||||
private void doPrewritePrimaryKeyWithRetry(BackOffer backOffer, ByteString key, ByteString value)
|
||||
throws TiBatchWriteException {
|
||||
Pair<TiRegion, Metapb.Store> pair = this.regionManager.getRegionStorePairByKey(key, backOffer);
|
||||
Pair<TiRegion, TiStore> pair = this.regionManager.getRegionStorePairByKey(key, backOffer);
|
||||
TiRegion tiRegion = pair.first;
|
||||
Metapb.Store store = pair.second;
|
||||
TiStore store = pair.second;
|
||||
|
||||
Kvrpcpb.Mutation mutation;
|
||||
if (!value.isEmpty()) {
|
||||
|
@ -201,9 +201,9 @@ public class TwoPhaseCommitter {
|
|||
|
||||
private void doCommitPrimaryKeyWithRetry(BackOffer backOffer, ByteString key, long commitTs)
|
||||
throws TiBatchWriteException {
|
||||
Pair<TiRegion, Metapb.Store> pair = this.regionManager.getRegionStorePairByKey(key, backOffer);
|
||||
Pair<TiRegion, TiStore> pair = this.regionManager.getRegionStorePairByKey(key, backOffer);
|
||||
TiRegion tiRegion = pair.first;
|
||||
Metapb.Store store = pair.second;
|
||||
TiStore store = pair.second;
|
||||
ByteString[] keys = new ByteString[] {key};
|
||||
|
||||
// send rpc request to tikv server
|
||||
|
@ -335,11 +335,11 @@ public class TwoPhaseCommitter {
|
|||
// groups keys by region
|
||||
GroupKeyResult groupResult = this.groupKeysByRegion(keys, size, backOffer);
|
||||
List<BatchKeys> batchKeyList = new LinkedList<>();
|
||||
Map<Pair<TiRegion, Metapb.Store>, List<ByteString>> groupKeyMap = groupResult.getGroupsResult();
|
||||
Map<Pair<TiRegion, TiStore>, List<ByteString>> groupKeyMap = groupResult.getGroupsResult();
|
||||
|
||||
for (Map.Entry<Pair<TiRegion, Metapb.Store>, List<ByteString>> entry : groupKeyMap.entrySet()) {
|
||||
for (Map.Entry<Pair<TiRegion, TiStore>, List<ByteString>> entry : groupKeyMap.entrySet()) {
|
||||
TiRegion tiRegion = entry.getKey().first;
|
||||
Metapb.Store store = entry.getKey().second;
|
||||
TiStore store = entry.getKey().second;
|
||||
this.appendBatchBySize(batchKeyList, tiRegion, store, entry.getValue(), true, mutations);
|
||||
}
|
||||
|
||||
|
@ -450,7 +450,7 @@ public class TwoPhaseCommitter {
|
|||
private void appendBatchBySize(
|
||||
List<BatchKeys> batchKeyList,
|
||||
TiRegion tiRegion,
|
||||
Metapb.Store store,
|
||||
TiStore store,
|
||||
List<ByteString> keys,
|
||||
boolean sizeIncludeValue,
|
||||
Map<ByteString, Kvrpcpb.Mutation> mutations) {
|
||||
|
@ -571,11 +571,11 @@ public class TwoPhaseCommitter {
|
|||
// groups keys by region
|
||||
GroupKeyResult groupResult = this.groupKeysByRegion(keys, size, backOffer);
|
||||
List<BatchKeys> batchKeyList = new ArrayList<>();
|
||||
Map<Pair<TiRegion, Metapb.Store>, List<ByteString>> groupKeyMap = groupResult.getGroupsResult();
|
||||
Map<Pair<TiRegion, TiStore>, List<ByteString>> groupKeyMap = groupResult.getGroupsResult();
|
||||
|
||||
for (Map.Entry<Pair<TiRegion, Metapb.Store>, List<ByteString>> entry : groupKeyMap.entrySet()) {
|
||||
for (Map.Entry<Pair<TiRegion, TiStore>, List<ByteString>> entry : groupKeyMap.entrySet()) {
|
||||
TiRegion tiRegion = entry.getKey().first;
|
||||
Metapb.Store store = entry.getKey().second;
|
||||
TiStore store = entry.getKey().second;
|
||||
this.appendBatchBySize(batchKeyList, tiRegion, store, entry.getValue(), false, null);
|
||||
}
|
||||
|
||||
|
@ -615,13 +615,12 @@ public class TwoPhaseCommitter {
|
|||
|
||||
private GroupKeyResult groupKeysByRegion(ByteString[] keys, int size, BackOffer backOffer)
|
||||
throws TiBatchWriteException {
|
||||
Map<Pair<TiRegion, Metapb.Store>, List<ByteString>> groups = new HashMap<>();
|
||||
Map<Pair<TiRegion, TiStore>, List<ByteString>> groups = new HashMap<>();
|
||||
int index = 0;
|
||||
try {
|
||||
for (; index < size; index++) {
|
||||
ByteString key = keys[index];
|
||||
Pair<TiRegion, Metapb.Store> pair =
|
||||
this.regionManager.getRegionStorePairByKey(key, backOffer);
|
||||
Pair<TiRegion, TiStore> pair = this.regionManager.getRegionStorePairByKey(key, backOffer);
|
||||
if (pair != null) {
|
||||
groups.computeIfAbsent(pair, e -> new ArrayList<>()).add(key);
|
||||
}
|
||||
|
|
|
@ -33,11 +33,11 @@ import org.tikv.common.meta.TiTimestamp;
|
|||
import org.tikv.common.region.RegionManager;
|
||||
import org.tikv.common.region.RegionStoreClient;
|
||||
import org.tikv.common.region.TiRegion;
|
||||
import org.tikv.common.region.TiStore;
|
||||
import org.tikv.common.util.BackOffFunction;
|
||||
import org.tikv.common.util.BackOffer;
|
||||
import org.tikv.common.util.ConcreteBackOffer;
|
||||
import org.tikv.kvproto.Kvrpcpb;
|
||||
import org.tikv.kvproto.Metapb;
|
||||
import org.tikv.txn.type.ClientRPCResult;
|
||||
|
||||
/** KV client of transaction APIs for GET/PUT/DELETE/SCAN */
|
||||
|
@ -94,7 +94,7 @@ public class TxnKVClient implements AutoCloseable {
|
|||
long lockTTL,
|
||||
long startTs,
|
||||
TiRegion tiRegion,
|
||||
Metapb.Store store) {
|
||||
TiStore store) {
|
||||
ClientRPCResult result = new ClientRPCResult(true, false, null);
|
||||
// send request
|
||||
RegionStoreClient client = clientBuilder.build(tiRegion, store);
|
||||
|
@ -116,7 +116,7 @@ public class TxnKVClient implements AutoCloseable {
|
|||
long startTs,
|
||||
long ttl,
|
||||
TiRegion tiRegion,
|
||||
Metapb.Store store) {
|
||||
TiStore store) {
|
||||
ClientRPCResult result = new ClientRPCResult(true, false, null);
|
||||
// send request
|
||||
RegionStoreClient client = clientBuilder.build(tiRegion, store);
|
||||
|
@ -148,7 +148,7 @@ public class TxnKVClient implements AutoCloseable {
|
|||
long startTs,
|
||||
long commitTs,
|
||||
TiRegion tiRegion,
|
||||
Metapb.Store store) {
|
||||
TiStore store) {
|
||||
ClientRPCResult result = new ClientRPCResult(true, false, null);
|
||||
// send request
|
||||
RegionStoreClient client = clientBuilder.build(tiRegion, store);
|
||||
|
|
|
@ -19,16 +19,15 @@ import com.google.protobuf.ByteString;
|
|||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import org.tikv.common.region.TiRegion;
|
||||
import org.tikv.kvproto.Metapb;
|
||||
import org.tikv.common.region.TiStore;
|
||||
|
||||
public class BatchKeys {
|
||||
private final TiRegion region;
|
||||
private final Metapb.Store store;
|
||||
private final TiStore store;
|
||||
private List<ByteString> keys;
|
||||
private final int sizeInBytes;
|
||||
|
||||
public BatchKeys(
|
||||
TiRegion region, Metapb.Store store, List<ByteString> keysInput, int sizeInBytes) {
|
||||
public BatchKeys(TiRegion region, TiStore store, List<ByteString> keysInput, int sizeInBytes) {
|
||||
this.region = region;
|
||||
this.store = store;
|
||||
this.keys = new ArrayList<>();
|
||||
|
@ -48,7 +47,7 @@ public class BatchKeys {
|
|||
return region;
|
||||
}
|
||||
|
||||
public Metapb.Store getStore() {
|
||||
public TiStore getStore() {
|
||||
return store;
|
||||
}
|
||||
|
||||
|
|
|
@ -20,22 +20,22 @@ import java.util.HashMap;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import org.tikv.common.region.TiRegion;
|
||||
import org.tikv.common.region.TiStore;
|
||||
import org.tikv.common.util.Pair;
|
||||
import org.tikv.kvproto.Metapb;
|
||||
|
||||
public class GroupKeyResult {
|
||||
|
||||
private Map<Pair<TiRegion, Metapb.Store>, List<ByteString>> groupsResult;
|
||||
private Map<Pair<TiRegion, TiStore>, List<ByteString>> groupsResult;
|
||||
|
||||
public GroupKeyResult() {
|
||||
this.groupsResult = new HashMap<>();
|
||||
}
|
||||
|
||||
public Map<Pair<TiRegion, Metapb.Store>, List<ByteString>> getGroupsResult() {
|
||||
public Map<Pair<TiRegion, TiStore>, List<ByteString>> getGroupsResult() {
|
||||
return groupsResult;
|
||||
}
|
||||
|
||||
public void setGroupsResult(Map<Pair<TiRegion, Metapb.Store>, List<ByteString>> groupsResult) {
|
||||
public void setGroupsResult(Map<Pair<TiRegion, TiStore>, List<ByteString>> groupsResult) {
|
||||
this.groupsResult = groupsResult;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,24 +0,0 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(":rule.bzl", "junit_suite_test")
|
||||
|
||||
junit_suite_test(
|
||||
name = "tikv-client-java-test",
|
||||
srcs = glob(
|
||||
["**/*.java"],
|
||||
),
|
||||
deps = [
|
||||
"//src/main/java/com/pingcap/tikv:tikv-java-client-lib",
|
||||
"//:java",
|
||||
"//:java_compile_imports",
|
||||
"@com_fasterxml_jackson_core_jackson_annotations//jar",
|
||||
"@com_fasterxml_jackson_core_jackson_core//jar",
|
||||
"@com_fasterxml_jackson_core_jackson_databind//jar",
|
||||
|
||||
"@org_pubref_rules_protobuf//java:grpc_compiletime_deps",
|
||||
"@org_pubref_rules_protobuf//java:netty_runtime_deps",
|
||||
"@net_sf_trove4j_trove4j//jar",
|
||||
"@junit_junit//jar",
|
||||
"@joda_time//jar",
|
||||
],
|
||||
)
|
|
@ -1,10 +1,13 @@
|
|||
package org.tikv.common;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.protobuf.ByteString;
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.stream.Collectors;
|
||||
import org.junit.Before;
|
||||
import org.tikv.common.TiConfiguration.KVMode;
|
||||
import org.tikv.common.region.TiRegion;
|
||||
import org.tikv.common.region.TiStore;
|
||||
import org.tikv.kvproto.Metapb;
|
||||
import org.tikv.kvproto.Pdpb;
|
||||
|
||||
|
@ -27,14 +30,25 @@ public class MockServerTest extends PDMockServerTest {
|
|||
.addPeers(Metapb.Peer.newBuilder().setId(11).setStoreId(13))
|
||||
.build();
|
||||
|
||||
List<Metapb.Store> s =
|
||||
ImmutableList.of(
|
||||
Metapb.Store.newBuilder()
|
||||
.setAddress("localhost:1234")
|
||||
.setVersion("5.0.0")
|
||||
.setId(13)
|
||||
.build());
|
||||
|
||||
region =
|
||||
new TiRegion(
|
||||
session.getConf(),
|
||||
r,
|
||||
r.getPeers(0),
|
||||
session.getConf().getIsolationLevel(),
|
||||
session.getConf().getCommandPriority(),
|
||||
KVMode.TXN);
|
||||
r.getPeersList(),
|
||||
s.stream().map(TiStore::new).collect(Collectors.toList()));
|
||||
pdServer.addGetRegionResp(Pdpb.GetRegionResponse.newBuilder().setRegion(r).build());
|
||||
for (Metapb.Store store : s) {
|
||||
pdServer.addGetStoreResp(Pdpb.GetStoreResponse.newBuilder().setStore(store).build());
|
||||
}
|
||||
server = new KVMockServer();
|
||||
port = server.start(region);
|
||||
}
|
||||
|
|
|
@ -18,15 +18,14 @@ package org.tikv.common;
|
|||
import static org.junit.Assert.*;
|
||||
import static org.tikv.common.GrpcUtils.encodeKey;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.protobuf.ByteString;
|
||||
import java.util.concurrent.*;
|
||||
import org.junit.Test;
|
||||
import org.tikv.common.exception.GrpcException;
|
||||
import org.tikv.common.meta.TiTimestamp;
|
||||
import org.tikv.common.region.TiRegion;
|
||||
import org.tikv.common.util.BackOffer;
|
||||
import org.tikv.common.util.ConcreteBackOffer;
|
||||
import org.tikv.common.util.Pair;
|
||||
import org.tikv.kvproto.Metapb;
|
||||
import org.tikv.kvproto.Metapb.Store;
|
||||
import org.tikv.kvproto.Metapb.StoreState;
|
||||
|
@ -37,7 +36,7 @@ public class PDClientTest extends PDMockServerTest {
|
|||
@Test
|
||||
public void testCreate() throws Exception {
|
||||
try (PDClient client = session.getPDClient()) {
|
||||
assertEquals(client.getLeaderWrapper().getLeaderInfo(), LOCAL_ADDR + ":" + pdServer.port);
|
||||
assertEquals(client.getPdClientWrapper().getLeaderInfo(), LOCAL_ADDR + ":" + pdServer.port);
|
||||
assertEquals(client.getHeader().getClusterId(), CLUSTER_ID);
|
||||
}
|
||||
}
|
||||
|
@ -45,17 +44,18 @@ public class PDClientTest extends PDMockServerTest {
|
|||
@Test
|
||||
public void testSwitchLeader() throws Exception {
|
||||
try (PDClient client = session.getPDClient()) {
|
||||
client.switchLeader(ImmutableList.of("http://" + LOCAL_ADDR + ":" + (pdServer.port + 1)));
|
||||
client.trySwitchLeader("http://" + LOCAL_ADDR + ":" + (pdServer.port + 1));
|
||||
assertEquals(
|
||||
client.getLeaderWrapper().getLeaderInfo(), LOCAL_ADDR + ":" + (pdServer.port + 1));
|
||||
"http://" + LOCAL_ADDR + ":" + (pdServer.port + 1),
|
||||
client.getPdClientWrapper().getLeaderInfo());
|
||||
}
|
||||
tearDown();
|
||||
setUp(LOCAL_ADDR_IPV6);
|
||||
try (PDClient client = session.getPDClient()) {
|
||||
client.switchLeader(
|
||||
ImmutableList.of("http://" + LOCAL_ADDR_IPV6 + ":" + (pdServer.port + 2)));
|
||||
client.trySwitchLeader("http://" + LOCAL_ADDR_IPV6 + ":" + (pdServer.port + 2));
|
||||
assertEquals(
|
||||
client.getLeaderWrapper().getLeaderInfo(), LOCAL_ADDR_IPV6 + ":" + (pdServer.port + 2));
|
||||
"http://" + LOCAL_ADDR_IPV6 + ":" + (pdServer.port + 2),
|
||||
client.getPdClientWrapper().getLeaderInfo());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -85,40 +85,16 @@ public class PDClientTest extends PDMockServerTest {
|
|||
GrpcUtils.makePeer(1, 10),
|
||||
GrpcUtils.makePeer(2, 20))));
|
||||
try (PDClient client = session.getPDClient()) {
|
||||
TiRegion r = client.getRegionByKey(defaultBackOff(), ByteString.EMPTY);
|
||||
Pair<Metapb.Region, Metapb.Peer> rl =
|
||||
client.getRegionByKey(defaultBackOff(), ByteString.EMPTY);
|
||||
Metapb.Region r = rl.first;
|
||||
Metapb.Peer l = rl.second;
|
||||
assertEquals(r.getStartKey(), ByteString.copyFrom(startKey));
|
||||
assertEquals(r.getEndKey(), ByteString.copyFrom(endKey));
|
||||
assertEquals(r.getRegionEpoch().getConfVer(), confVer);
|
||||
assertEquals(r.getRegionEpoch().getVersion(), ver);
|
||||
assertEquals(r.getLeader().getId(), 1);
|
||||
assertEquals(r.getLeader().getStoreId(), 10);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetRegionByKeyAsync() throws Exception {
|
||||
byte[] startKey = new byte[] {1, 0, 2, 4};
|
||||
byte[] endKey = new byte[] {1, 0, 2, 5};
|
||||
int confVer = 1026;
|
||||
int ver = 1027;
|
||||
pdServer.addGetRegionResp(
|
||||
GrpcUtils.makeGetRegionResponse(
|
||||
pdServer.getClusterId(),
|
||||
GrpcUtils.makeRegion(
|
||||
1,
|
||||
encodeKey(startKey),
|
||||
encodeKey(endKey),
|
||||
GrpcUtils.makeRegionEpoch(confVer, ver),
|
||||
GrpcUtils.makePeer(1, 10),
|
||||
GrpcUtils.makePeer(2, 20))));
|
||||
try (PDClient client = session.getPDClient()) {
|
||||
TiRegion r = client.getRegionByKeyAsync(defaultBackOff(), ByteString.EMPTY).get();
|
||||
assertEquals(r.getStartKey(), ByteString.copyFrom(startKey));
|
||||
assertEquals(r.getEndKey(), ByteString.copyFrom(endKey));
|
||||
assertEquals(r.getRegionEpoch().getConfVer(), confVer);
|
||||
assertEquals(r.getRegionEpoch().getVersion(), ver);
|
||||
assertEquals(r.getLeader().getId(), 1);
|
||||
assertEquals(r.getLeader().getStoreId(), 10);
|
||||
assertEquals(l.getId(), 1);
|
||||
assertEquals(l.getStoreId(), 10);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -140,40 +116,15 @@ public class PDClientTest extends PDMockServerTest {
|
|||
GrpcUtils.makePeer(1, 10),
|
||||
GrpcUtils.makePeer(2, 20))));
|
||||
try (PDClient client = session.getPDClient()) {
|
||||
TiRegion r = client.getRegionByID(defaultBackOff(), 0);
|
||||
Pair<Metapb.Region, Metapb.Peer> rl = client.getRegionByID(defaultBackOff(), 0);
|
||||
Metapb.Region r = rl.first;
|
||||
Metapb.Peer l = rl.second;
|
||||
assertEquals(r.getStartKey(), ByteString.copyFrom(startKey));
|
||||
assertEquals(r.getEndKey(), ByteString.copyFrom(endKey));
|
||||
assertEquals(r.getRegionEpoch().getConfVer(), confVer);
|
||||
assertEquals(r.getRegionEpoch().getVersion(), ver);
|
||||
assertEquals(r.getLeader().getId(), 1);
|
||||
assertEquals(r.getLeader().getStoreId(), 10);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetRegionByIdAsync() throws Exception {
|
||||
byte[] startKey = new byte[] {1, 0, 2, 4};
|
||||
byte[] endKey = new byte[] {1, 0, 2, 5};
|
||||
int confVer = 1026;
|
||||
int ver = 1027;
|
||||
pdServer.addGetRegionByIDResp(
|
||||
GrpcUtils.makeGetRegionResponse(
|
||||
pdServer.getClusterId(),
|
||||
GrpcUtils.makeRegion(
|
||||
1,
|
||||
encodeKey(startKey),
|
||||
encodeKey(endKey),
|
||||
GrpcUtils.makeRegionEpoch(confVer, ver),
|
||||
GrpcUtils.makePeer(1, 10),
|
||||
GrpcUtils.makePeer(2, 20))));
|
||||
try (PDClient client = session.getPDClient()) {
|
||||
TiRegion r = client.getRegionByIDAsync(defaultBackOff(), 0).get();
|
||||
assertEquals(r.getStartKey(), ByteString.copyFrom(startKey));
|
||||
assertEquals(r.getEndKey(), ByteString.copyFrom(endKey));
|
||||
assertEquals(r.getRegionEpoch().getConfVer(), confVer);
|
||||
assertEquals(r.getRegionEpoch().getVersion(), ver);
|
||||
assertEquals(r.getLeader().getId(), 1);
|
||||
assertEquals(r.getLeader().getStoreId(), 10);
|
||||
assertEquals(l.getId(), 1);
|
||||
assertEquals(l.getStoreId(), 10);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -208,38 +159,6 @@ public class PDClientTest extends PDMockServerTest {
|
|||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetStoreAsync() throws Exception {
|
||||
long storeId = 1;
|
||||
String testAddress = "testAddress";
|
||||
pdServer.addGetStoreResp(
|
||||
GrpcUtils.makeGetStoreResponse(
|
||||
pdServer.getClusterId(),
|
||||
GrpcUtils.makeStore(
|
||||
storeId,
|
||||
testAddress,
|
||||
Metapb.StoreState.Up,
|
||||
GrpcUtils.makeStoreLabel("k1", "v1"),
|
||||
GrpcUtils.makeStoreLabel("k2", "v2"))));
|
||||
try (PDClient client = session.getPDClient()) {
|
||||
Store r = client.getStoreAsync(defaultBackOff(), 0).get();
|
||||
assertEquals(r.getId(), storeId);
|
||||
assertEquals(r.getAddress(), testAddress);
|
||||
assertEquals(r.getState(), Metapb.StoreState.Up);
|
||||
assertEquals(r.getLabels(0).getKey(), "k1");
|
||||
assertEquals(r.getLabels(1).getKey(), "k2");
|
||||
assertEquals(r.getLabels(0).getValue(), "v1");
|
||||
assertEquals(r.getLabels(1).getValue(), "v2");
|
||||
|
||||
pdServer.addGetStoreResp(
|
||||
GrpcUtils.makeGetStoreResponse(
|
||||
pdServer.getClusterId(),
|
||||
GrpcUtils.makeStore(storeId, testAddress, Metapb.StoreState.Tombstone)));
|
||||
assertEquals(
|
||||
StoreState.Tombstone, client.getStoreAsync(defaultBackOff(), 0).get().getState());
|
||||
}
|
||||
}
|
||||
|
||||
private BackOffer defaultBackOff() {
|
||||
return ConcreteBackOffer.newCustomBackOff(1000);
|
||||
}
|
||||
|
|
|
@ -43,7 +43,7 @@ public class PDMockServer extends PDGrpc.PDImplBase {
|
|||
@Override
|
||||
public void getMembers(GetMembersRequest request, StreamObserver<GetMembersResponse> resp) {
|
||||
try {
|
||||
resp.onNext(getMembersResp.removeFirst().get());
|
||||
resp.onNext(getMembersResp.getFirst().get());
|
||||
resp.onCompleted();
|
||||
} catch (Exception e) {
|
||||
resp.onError(Status.INTERNAL.asRuntimeException());
|
||||
|
|
|
@ -26,10 +26,10 @@ import org.junit.Test;
|
|||
import org.tikv.common.key.Key;
|
||||
import org.tikv.common.region.RegionManager;
|
||||
import org.tikv.common.region.TiRegion;
|
||||
import org.tikv.common.region.TiStore;
|
||||
import org.tikv.common.util.KeyRangeUtils;
|
||||
import org.tikv.common.util.Pair;
|
||||
import org.tikv.kvproto.Metapb;
|
||||
import org.tikv.kvproto.Metapb.Store;
|
||||
import org.tikv.kvproto.Metapb.StoreState;
|
||||
|
||||
public class RegionManagerTest extends PDMockServerTest {
|
||||
|
@ -61,6 +61,7 @@ public class RegionManagerTest extends PDMockServerTest {
|
|||
int confVer = 1026;
|
||||
int ver = 1027;
|
||||
long regionId = 233;
|
||||
String testAddress = "testAddress";
|
||||
pdServer.addGetRegionResp(
|
||||
GrpcUtils.makeGetRegionResponse(
|
||||
pdServer.getClusterId(),
|
||||
|
@ -71,6 +72,18 @@ public class RegionManagerTest extends PDMockServerTest {
|
|||
GrpcUtils.makeRegionEpoch(confVer, ver),
|
||||
GrpcUtils.makePeer(1, 10),
|
||||
GrpcUtils.makePeer(2, 20))));
|
||||
for (long id : new long[] {10, 20}) {
|
||||
pdServer.addGetStoreResp(
|
||||
GrpcUtils.makeGetStoreResponse(
|
||||
pdServer.getClusterId(),
|
||||
GrpcUtils.makeStore(
|
||||
id,
|
||||
testAddress,
|
||||
Metapb.StoreState.Up,
|
||||
GrpcUtils.makeStoreLabel("k1", "v1"),
|
||||
GrpcUtils.makeStoreLabel("k2", "v2"))));
|
||||
}
|
||||
|
||||
TiRegion region = mgr.getRegionByKey(startKey);
|
||||
assertEquals(region.getId(), regionId);
|
||||
|
||||
|
@ -106,16 +119,19 @@ public class RegionManagerTest extends PDMockServerTest {
|
|||
GrpcUtils.makeRegionEpoch(confVer, ver),
|
||||
GrpcUtils.makePeer(storeId, 10),
|
||||
GrpcUtils.makePeer(storeId + 1, 20))));
|
||||
pdServer.addGetStoreResp(
|
||||
GrpcUtils.makeGetStoreResponse(
|
||||
pdServer.getClusterId(),
|
||||
GrpcUtils.makeStore(
|
||||
storeId,
|
||||
testAddress,
|
||||
Metapb.StoreState.Up,
|
||||
GrpcUtils.makeStoreLabel("k1", "v1"),
|
||||
GrpcUtils.makeStoreLabel("k2", "v2"))));
|
||||
Pair<TiRegion, Store> pair = mgr.getRegionStorePairByKey(searchKey);
|
||||
for (long id : new long[] {10, 20}) {
|
||||
pdServer.addGetStoreResp(
|
||||
GrpcUtils.makeGetStoreResponse(
|
||||
pdServer.getClusterId(),
|
||||
GrpcUtils.makeStore(
|
||||
id,
|
||||
testAddress,
|
||||
Metapb.StoreState.Up,
|
||||
GrpcUtils.makeStoreLabel("k1", "v1"),
|
||||
GrpcUtils.makeStoreLabel("k2", "v2"))));
|
||||
}
|
||||
|
||||
Pair<TiRegion, TiStore> pair = mgr.getRegionStorePairByKey(searchKey);
|
||||
assertEquals(pair.first.getId(), regionId);
|
||||
assertEquals(pair.first.getId(), storeId);
|
||||
}
|
||||
|
@ -133,8 +149,8 @@ public class RegionManagerTest extends PDMockServerTest {
|
|||
Metapb.StoreState.Up,
|
||||
GrpcUtils.makeStoreLabel("k1", "v1"),
|
||||
GrpcUtils.makeStoreLabel("k2", "v2"))));
|
||||
Store store = mgr.getStoreById(storeId);
|
||||
assertEquals(store.getId(), storeId);
|
||||
TiStore store = mgr.getStoreById(storeId);
|
||||
assertEquals(store.getStore().getId(), storeId);
|
||||
|
||||
pdServer.addGetStoreResp(
|
||||
GrpcUtils.makeGetStoreResponse(
|
||||
|
|
|
@ -24,6 +24,7 @@ import org.junit.Test;
|
|||
import org.tikv.common.region.RegionManager;
|
||||
import org.tikv.common.region.RegionStoreClient;
|
||||
import org.tikv.common.region.RegionStoreClient.RegionStoreClientBuilder;
|
||||
import org.tikv.common.region.TiStore;
|
||||
import org.tikv.common.util.BackOffer;
|
||||
import org.tikv.common.util.ConcreteBackOffer;
|
||||
import org.tikv.kvproto.Kvrpcpb;
|
||||
|
@ -40,19 +41,20 @@ public class RegionStoreClientTest extends MockServerTest {
|
|||
}
|
||||
|
||||
private RegionStoreClient createClient(String version) {
|
||||
Metapb.Store store =
|
||||
Metapb.Store meta =
|
||||
Metapb.Store.newBuilder()
|
||||
.setAddress(LOCAL_ADDR + ":" + port)
|
||||
.setId(1)
|
||||
.setState(Metapb.StoreState.Up)
|
||||
.setVersion(version)
|
||||
.build();
|
||||
TiStore store = new TiStore(meta);
|
||||
|
||||
RegionStoreClientBuilder builder =
|
||||
new RegionStoreClientBuilder(
|
||||
session.getConf(),
|
||||
session.getChannelFactory(),
|
||||
new RegionManager(session.getPDClient()),
|
||||
new RegionManager(session.getConf(), session.getPDClient()),
|
||||
session.getPDClient());
|
||||
|
||||
return builder.build(region, store);
|
||||
|
|
|
@ -0,0 +1,124 @@
|
|||
package org.tikv.common;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.RejectedExecutionException;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import org.junit.After;
|
||||
import org.junit.Test;
|
||||
import org.tikv.common.region.TiRegion;
|
||||
import org.tikv.raw.RawKVClient;
|
||||
|
||||
public class TiSessionTest {
|
||||
private TiSession session;
|
||||
|
||||
@After
|
||||
public void tearDown() throws Exception {
|
||||
if (session != null) {
|
||||
session.close();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void closeWithRunningTaskTest() throws Exception {
|
||||
doCloseWithRunningTaskTest(true, 0);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void closeAwaitTerminationWithRunningTaskTest() throws Exception {
|
||||
doCloseWithRunningTaskTest(false, 10000);
|
||||
}
|
||||
|
||||
private void doCloseWithRunningTaskTest(boolean now, long timeoutMS) throws Exception {
|
||||
TiConfiguration conf = TiConfiguration.createRawDefault();
|
||||
session = TiSession.create(conf);
|
||||
|
||||
ExecutorService executorService = session.getThreadPoolForBatchGet();
|
||||
AtomicReference<InterruptedException> interruptedException = new AtomicReference<>();
|
||||
executorService.submit(
|
||||
() -> {
|
||||
int i = 1;
|
||||
while (true) {
|
||||
i = i + 1;
|
||||
try {
|
||||
Thread.sleep(1000);
|
||||
} catch (InterruptedException e) {
|
||||
interruptedException.set(e);
|
||||
break;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
Thread.sleep(2000);
|
||||
|
||||
long startMS = System.currentTimeMillis();
|
||||
if (now) {
|
||||
session.close();
|
||||
Thread.sleep(1000);
|
||||
assertNotNull(interruptedException.get());
|
||||
assertTrue(System.currentTimeMillis() - startMS < 2000);
|
||||
} else {
|
||||
session.closeAwaitTermination(timeoutMS);
|
||||
assertNotNull(interruptedException.get());
|
||||
assertTrue(System.currentTimeMillis() - startMS >= timeoutMS);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void closeTest() throws Exception {
|
||||
doCloseTest(true, 0);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void closeAwaitTerminationTest() throws Exception {
|
||||
doCloseTest(false, 10000);
|
||||
}
|
||||
|
||||
private void doCloseTest(boolean now, long timeoutMS) throws Exception {
|
||||
TiConfiguration conf = TiConfiguration.createRawDefault();
|
||||
session = TiSession.create(conf);
|
||||
RawKVClient client = session.createRawClient();
|
||||
|
||||
// test getRegionByKey
|
||||
ByteString key = ByteString.copyFromUtf8("key");
|
||||
ByteString value = ByteString.copyFromUtf8("value");
|
||||
TiRegion region = session.getRegionManager().getRegionByKey(key);
|
||||
assertNotNull(region);
|
||||
|
||||
// test RawKVClient
|
||||
client.put(key, value);
|
||||
List<ByteString> keys = new ArrayList<>();
|
||||
keys.add(key);
|
||||
client.batchGet(keys);
|
||||
|
||||
// close TiSession
|
||||
if (now) {
|
||||
session.close();
|
||||
} else {
|
||||
session.closeAwaitTermination(timeoutMS);
|
||||
}
|
||||
|
||||
// test getRegionByKey
|
||||
try {
|
||||
session.getRegionManager().getRegionByKey(key);
|
||||
fail();
|
||||
} catch (RuntimeException e) {
|
||||
assertEquals("this TiSession is closed!", e.getMessage());
|
||||
}
|
||||
|
||||
// test RawKVClient
|
||||
try {
|
||||
client.batchGet(keys);
|
||||
fail();
|
||||
} catch (RejectedExecutionException e) {
|
||||
assertTrue(e.getMessage().contains("rejected from java.util.concurrent.ThreadPoolExecutor"));
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,83 @@
|
|||
package org.tikv.raw;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import org.junit.After;
|
||||
import org.junit.Test;
|
||||
import org.tikv.common.TiConfiguration;
|
||||
import org.tikv.common.TiSession;
|
||||
|
||||
public class MetricsTest {
|
||||
private List<TiSession> sessionList = new ArrayList<>();
|
||||
|
||||
@After
|
||||
public void tearDown() throws Exception {
|
||||
for (TiSession tiSession : sessionList) {
|
||||
if (tiSession != null) {
|
||||
tiSession.close();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void oneTiSession() throws Exception {
|
||||
TiConfiguration conf = TiConfiguration.createRawDefault();
|
||||
conf.setMetricsEnable(true);
|
||||
TiSession session = TiSession.create(conf);
|
||||
sessionList.add(session);
|
||||
RawKVClient client = session.createRawClient();
|
||||
client.put(ByteString.copyFromUtf8("k"), ByteString.copyFromUtf8("v"));
|
||||
ByteString result = client.get(ByteString.copyFromUtf8("k"));
|
||||
assertEquals(result.toStringUtf8(), "v");
|
||||
client.close();
|
||||
session.close();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void twoTiSession() throws Exception {
|
||||
TiConfiguration conf = TiConfiguration.createRawDefault();
|
||||
conf.setMetricsEnable(true);
|
||||
|
||||
TiSession session1 = TiSession.create(conf);
|
||||
sessionList.add(session1);
|
||||
RawKVClient client1 = session1.createRawClient();
|
||||
client1.put(ByteString.copyFromUtf8("k1"), ByteString.copyFromUtf8("v1"));
|
||||
|
||||
TiSession session2 = TiSession.create(conf);
|
||||
sessionList.add(session2);
|
||||
RawKVClient client2 = session2.createRawClient();
|
||||
client2.put(ByteString.copyFromUtf8("k2"), ByteString.copyFromUtf8("v2"));
|
||||
|
||||
client1.close();
|
||||
session1.close();
|
||||
|
||||
ByteString result = client2.get(ByteString.copyFromUtf8("k2"));
|
||||
assertEquals(result.toStringUtf8(), "v2");
|
||||
|
||||
client2.close();
|
||||
session2.close();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void twoTiSessionWithDifferentPort() {
|
||||
TiConfiguration conf1 = TiConfiguration.createRawDefault();
|
||||
conf1.setMetricsEnable(true);
|
||||
conf1.setMetricsPort(12345);
|
||||
TiSession session1 = TiSession.create(conf1);
|
||||
sessionList.add(session1);
|
||||
|
||||
TiConfiguration conf2 = TiConfiguration.createRawDefault();
|
||||
conf2.setMetricsEnable(true);
|
||||
conf2.setMetricsPort(54321);
|
||||
try {
|
||||
TiSession.create(conf2);
|
||||
assertEquals(1, 2);
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertEquals(
|
||||
"Do dot support multiple tikv.metrics.port, which are 54321 and 12345", e.getMessage());
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,5 +1,7 @@
|
|||
package org.tikv.raw;
|
||||
|
||||
import static org.junit.Assert.*;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
import java.io.IOException;
|
||||
import java.util.*;
|
||||
|
@ -8,6 +10,7 @@ import java.util.stream.Collectors;
|
|||
import org.apache.commons.lang3.RandomStringUtils;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Ignore;
|
||||
import org.junit.Test;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
@ -91,7 +94,8 @@ public class RawKVClientTest {
|
|||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
// tikv-4.0 does not support atomic api
|
||||
@Ignore
|
||||
public void atomicAPITest() {
|
||||
if (!initialized) return;
|
||||
long ttl = 10;
|
||||
|
@ -100,19 +104,20 @@ public class RawKVClientTest {
|
|||
ByteString value2 = ByteString.copyFromUtf8("value2");
|
||||
client.delete(key);
|
||||
ByteString res1 = client.putIfAbsent(key, value, ttl);
|
||||
assert res1.isEmpty();
|
||||
assertTrue(res1.isEmpty());
|
||||
ByteString res2 = client.putIfAbsent(key, value2, ttl);
|
||||
assert res2.equals(value);
|
||||
assertEquals(value, res2);
|
||||
try {
|
||||
Thread.sleep(ttl * 1000);
|
||||
} catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
}
|
||||
ByteString res3 = client.putIfAbsent(key, value, ttl);
|
||||
assert res3.isEmpty();
|
||||
assertTrue(res3.isEmpty());
|
||||
}
|
||||
|
||||
@Test
|
||||
// tikv-4.0 doest not support ttl
|
||||
@Ignore
|
||||
public void getKeyTTLTest() {
|
||||
if (!initialized) return;
|
||||
long ttl = 10;
|
||||
|
@ -254,30 +259,47 @@ public class RawKVClientTest {
|
|||
public void simpleTest() {
|
||||
if (!initialized) return;
|
||||
ByteString key = rawKey("key");
|
||||
ByteString key0 = rawKey("key0");
|
||||
ByteString key1 = rawKey("key1");
|
||||
ByteString key2 = rawKey("key2");
|
||||
ByteString key3 = rawKey("key3");
|
||||
ByteString value = rawValue("value");
|
||||
ByteString value1 = rawValue("value1");
|
||||
ByteString value2 = rawValue("value2");
|
||||
ByteString value3 = rawValue("value3");
|
||||
Kvrpcpb.KvPair kv = Kvrpcpb.KvPair.newBuilder().setKey(key).setValue(value).build();
|
||||
Kvrpcpb.KvPair kv1 = Kvrpcpb.KvPair.newBuilder().setKey(key1).setValue(value1).build();
|
||||
Kvrpcpb.KvPair kv2 = Kvrpcpb.KvPair.newBuilder().setKey(key2).setValue(value2).build();
|
||||
Kvrpcpb.KvPair kv3 = Kvrpcpb.KvPair.newBuilder().setKey(key3).setValue(value3).build();
|
||||
|
||||
try {
|
||||
checkEmpty(key1);
|
||||
checkEmpty(key2);
|
||||
checkPut(key1, value1);
|
||||
checkPut(key2, value2);
|
||||
List<Kvrpcpb.KvPair> result = new ArrayList<>();
|
||||
List<Kvrpcpb.KvPair> result2 = new ArrayList<>();
|
||||
result.add(kv1);
|
||||
result.add(kv2);
|
||||
checkScan(key, key3, result, limit);
|
||||
checkScan(key1, key3, result, limit);
|
||||
checkScan(key, key1, new ArrayList<>(), limit);
|
||||
result2.add(kv1);
|
||||
checkScan(key, key2, result2, limit);
|
||||
checkDeleteRange(ByteString.EMPTY, ByteString.EMPTY);
|
||||
checkEmpty(kv);
|
||||
checkEmpty(kv1);
|
||||
checkEmpty(kv2);
|
||||
checkEmpty(kv3);
|
||||
checkPut(kv);
|
||||
checkPut(kv1);
|
||||
checkPut(kv2);
|
||||
checkPut(kv3);
|
||||
// <key, value>, <key1,value1>, <key2,value2>, <key3,value3>
|
||||
// (-∞, +∞)
|
||||
checkScan(ByteString.EMPTY, ByteString.EMPTY, Arrays.asList(kv, kv1, kv2, kv3), limit);
|
||||
// (-∞, key3)
|
||||
checkScan(ByteString.EMPTY, key3, Arrays.asList(kv, kv1, kv2), limit);
|
||||
// [key1, +∞)
|
||||
checkScan(key1, ByteString.EMPTY, Arrays.asList(kv1, kv2, kv3), limit);
|
||||
// [key, key3)
|
||||
checkScan(key, key3, Arrays.asList(kv, kv1, kv2), limit);
|
||||
// [key1, key3)
|
||||
checkScan(key1, key3, Arrays.asList(kv1, kv2), limit);
|
||||
// [key0, key1)
|
||||
checkScan(key0, key1, new ArrayList<>(), limit);
|
||||
// [key, key2)
|
||||
checkScan(key, key2, Arrays.asList(kv, kv1), limit);
|
||||
checkDelete(key1);
|
||||
checkDelete(key2);
|
||||
checkDeleteRange(ByteString.EMPTY, ByteString.EMPTY);
|
||||
} catch (final TiKVException e) {
|
||||
logger.warn("Test fails with Exception: " + e);
|
||||
}
|
||||
|
@ -509,7 +531,7 @@ public class RawKVClientTest {
|
|||
} else {
|
||||
int i = 0;
|
||||
for (Map.Entry<ByteString, ByteString> pair : data.entrySet()) {
|
||||
assert client.get(pair.getKey()).equals(pair.getValue());
|
||||
assertEquals(pair.getValue(), client.get(pair.getKey()));
|
||||
i++;
|
||||
if (i >= getCases) {
|
||||
break;
|
||||
|
@ -756,27 +778,31 @@ public class RawKVClientTest {
|
|||
private void checkBatchGet(List<ByteString> keys) {
|
||||
List<Kvrpcpb.KvPair> result = client.batchGet(keys);
|
||||
for (Kvrpcpb.KvPair kvPair : result) {
|
||||
assert data.containsKey(kvPair.getKey());
|
||||
assert kvPair.getValue().equals(data.get(kvPair.getKey()));
|
||||
assertTrue(data.containsKey(kvPair.getKey()));
|
||||
assertEquals(data.get(kvPair.getKey()), kvPair.getValue());
|
||||
}
|
||||
}
|
||||
|
||||
private void checkPut(Kvrpcpb.KvPair kv) {
|
||||
checkPut(kv.getKey(), kv.getValue());
|
||||
}
|
||||
|
||||
private void checkPut(ByteString key, ByteString value) {
|
||||
client.put(key, value);
|
||||
assert client.get(key).equals(value);
|
||||
assertEquals(value, client.get(key));
|
||||
}
|
||||
|
||||
private void checkBatchPut(Map<ByteString, ByteString> kvPairs) {
|
||||
client.batchPut(kvPairs);
|
||||
for (Map.Entry<ByteString, ByteString> kvPair : kvPairs.entrySet()) {
|
||||
assert client.get(kvPair.getKey()).equals(kvPair.getValue());
|
||||
assertEquals(kvPair.getValue(), client.get(kvPair.getKey()));
|
||||
}
|
||||
}
|
||||
|
||||
private void checkScan(
|
||||
ByteString startKey, ByteString endKey, List<Kvrpcpb.KvPair> ans, int limit) {
|
||||
ByteString startKey, ByteString endKey, List<Kvrpcpb.KvPair> expected, int limit) {
|
||||
List<Kvrpcpb.KvPair> result = client.scan(startKey, endKey, limit);
|
||||
assert result.equals(ans);
|
||||
assertEquals(expected, result);
|
||||
}
|
||||
|
||||
private void checkScan(
|
||||
|
@ -812,7 +838,7 @@ public class RawKVClientTest {
|
|||
.setValue(kvPair.getValue())
|
||||
.build())
|
||||
.collect(Collectors.toList());
|
||||
assert result.get(i).equals(partialResult);
|
||||
assertEquals(partialResult, result.get(i));
|
||||
i++;
|
||||
}
|
||||
}
|
||||
|
@ -827,31 +853,35 @@ public class RawKVClientTest {
|
|||
logger.info("delete range complete");
|
||||
List<Kvrpcpb.KvPair> result = client.scan(startKey, endKey);
|
||||
logger.info("checking scan complete. number of remaining keys in range: " + result.size());
|
||||
assert result.isEmpty();
|
||||
assertTrue(result.isEmpty());
|
||||
}
|
||||
|
||||
private void checkPutTTL(ByteString key, ByteString value, long ttl) {
|
||||
client.put(key, value, ttl);
|
||||
assert client.get(key).equals(value);
|
||||
assertEquals(value, client.get(key));
|
||||
}
|
||||
|
||||
private void checkGetKeyTTL(ByteString key, long ttl) {
|
||||
Long t = client.getKeyTTL(key);
|
||||
assert t != null;
|
||||
assert t <= ttl && t > 0;
|
||||
assertNotNull(t);
|
||||
assertTrue(t <= ttl && t > 0);
|
||||
}
|
||||
|
||||
private void checkGetTTLTimeOut(ByteString key) {
|
||||
assert client.get(key).isEmpty();
|
||||
assertTrue(client.get(key).isEmpty());
|
||||
}
|
||||
|
||||
private void checkGetKeyTTLTimeOut(ByteString key) {
|
||||
Long t = client.getKeyTTL(key);
|
||||
assert t == null;
|
||||
assertNull(t);
|
||||
}
|
||||
|
||||
private void checkEmpty(Kvrpcpb.KvPair kv) {
|
||||
checkEmpty(kv.getKey());
|
||||
}
|
||||
|
||||
private void checkEmpty(ByteString key) {
|
||||
assert client.get(key).isEmpty();
|
||||
assertTrue(client.get(key).isEmpty());
|
||||
}
|
||||
|
||||
private static ByteString rawKey(String key) {
|
||||
|
|
|
@ -1,40 +0,0 @@
|
|||
def junit_suite_test(name, srcs, deps, size="small", resources=[], classpath_resources=[], jvm_flags=[], tags=[], data=[]):
|
||||
tests = []
|
||||
package = PACKAGE_NAME.replace("src/test/java/", "").replace("/", ".")
|
||||
for src in srcs:
|
||||
if src.endswith("Test.java"):
|
||||
if "/" in src:
|
||||
src = package + "." + src.replace("/", ".")
|
||||
tests += [src.replace(".java", ".class")]
|
||||
|
||||
|
||||
native.genrule(
|
||||
name = name + "-AllTests-gen",
|
||||
outs = ["AllTests.java"],
|
||||
cmd = """
|
||||
cat <<EOF >> $@
|
||||
package %s;
|
||||
|
||||
import org.junit.runner.RunWith;
|
||||
import org.junit.runners.Suite;
|
||||
|
||||
@RunWith(Suite.class)
|
||||
@Suite.SuiteClasses({%s})
|
||||
public class AllTests {}
|
||||
EOF
|
||||
""" % (package, ",".join(tests))
|
||||
)
|
||||
|
||||
native.java_test(
|
||||
name = name,
|
||||
srcs = srcs + ["AllTests.java"],
|
||||
test_class = package + ".AllTests",
|
||||
resources = resources,
|
||||
classpath_resources = classpath_resources,
|
||||
data = data,
|
||||
size = size,
|
||||
tags = tags,
|
||||
jvm_flags = jvm_flags,
|
||||
deps = deps + [
|
||||
],
|
||||
)
|
|
@ -0,0 +1,84 @@
|
|||
package org.tikv.txn;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import org.junit.After;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Ignore;
|
||||
import org.junit.Test;
|
||||
import org.tikv.common.TiConfiguration;
|
||||
import org.tikv.common.TiSession;
|
||||
import org.tikv.common.replica.Region;
|
||||
import org.tikv.common.replica.ReplicaSelector;
|
||||
import org.tikv.common.replica.Store;
|
||||
|
||||
public class ReplicaReadTest extends TXNTest {
|
||||
private TiSession session;
|
||||
private String key;
|
||||
private String value;
|
||||
|
||||
@Test
|
||||
public void leaderReadTest() {
|
||||
doTest(TiConfiguration.ReplicaRead.LEADER);
|
||||
}
|
||||
|
||||
// ci only has one TiKV instance
|
||||
@Ignore
|
||||
public void followerReadTest() {
|
||||
doTest(TiConfiguration.ReplicaRead.FOLLOWER);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void leadAndFollowerReadTest() {
|
||||
doTest(TiConfiguration.ReplicaRead.LEADER_AND_FOLLOWER);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void replicaSelectorTest() {
|
||||
TiConfiguration conf = TiConfiguration.createDefault();
|
||||
|
||||
conf.setReplicaSelector(
|
||||
new ReplicaSelector() {
|
||||
@Override
|
||||
public List<Store> select(Region region) {
|
||||
List<Store> list = new ArrayList<>();
|
||||
for (Store store : region.getStores()) {
|
||||
list.add(store);
|
||||
}
|
||||
return list;
|
||||
}
|
||||
});
|
||||
session = TiSession.create(conf);
|
||||
|
||||
putKV(key, value);
|
||||
ByteString v = session.createSnapshot().get(ByteString.copyFromUtf8(key));
|
||||
Assert.assertEquals(value, v.toStringUtf8());
|
||||
}
|
||||
|
||||
private void doTest(TiConfiguration.ReplicaRead replicaRead) {
|
||||
TiConfiguration conf = TiConfiguration.createDefault();
|
||||
conf.setReplicaRead(replicaRead);
|
||||
session = TiSession.create(conf);
|
||||
|
||||
putKV(key, value);
|
||||
ByteString v = session.createSnapshot().get(ByteString.copyFromUtf8(key));
|
||||
Assert.assertEquals(value, v.toStringUtf8());
|
||||
}
|
||||
|
||||
@Before
|
||||
public void setUp() {
|
||||
super.setUp();
|
||||
key = genRandomKey(64);
|
||||
value = "v0";
|
||||
}
|
||||
|
||||
@After
|
||||
public void tearDown() throws Exception {
|
||||
if (session != null) {
|
||||
session.close();
|
||||
}
|
||||
super.tearDown();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,131 @@
|
|||
package org.tikv.txn;
|
||||
|
||||
import static junit.framework.TestCase.assertTrue;
|
||||
import static junit.framework.TestCase.fail;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import java.util.stream.Collectors;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.tikv.common.TiConfiguration;
|
||||
import org.tikv.common.TiSession;
|
||||
import org.tikv.common.exception.RegionException;
|
||||
import org.tikv.common.region.RegionStoreClient;
|
||||
import org.tikv.common.region.TiRegion;
|
||||
import org.tikv.common.util.BackOffFunction;
|
||||
import org.tikv.common.util.BackOffer;
|
||||
import org.tikv.common.util.ConcreteBackOffer;
|
||||
import org.tikv.kvproto.Kvrpcpb;
|
||||
|
||||
public class TXNTest {
|
||||
static final int DEFAULT_TTL = 10;
|
||||
private TiSession session;
|
||||
RegionStoreClient.RegionStoreClientBuilder builder;
|
||||
|
||||
@Before
|
||||
public void setUp() {
|
||||
TiConfiguration conf = TiConfiguration.createDefault();
|
||||
try {
|
||||
session = TiSession.create(conf);
|
||||
this.builder = session.getRegionStoreClientBuilder();
|
||||
} catch (Exception e) {
|
||||
fail("TiDB cluster may not be present");
|
||||
}
|
||||
}
|
||||
|
||||
@After
|
||||
public void tearDown() throws Exception {
|
||||
if (session != null) {
|
||||
session.close();
|
||||
}
|
||||
}
|
||||
|
||||
void putKV(String key, String value) {
|
||||
long startTS = session.getTimestamp().getVersion();
|
||||
long commitTS = session.getTimestamp().getVersion();
|
||||
putKV(key, value, startTS, commitTS);
|
||||
}
|
||||
|
||||
void putKV(String key, String value, long startTS, long commitTS) {
|
||||
Kvrpcpb.Mutation m =
|
||||
Kvrpcpb.Mutation.newBuilder()
|
||||
.setKey(ByteString.copyFromUtf8(key))
|
||||
.setOp(Kvrpcpb.Op.Put)
|
||||
.setValue(ByteString.copyFromUtf8(value))
|
||||
.build();
|
||||
|
||||
boolean res = prewriteString(Collections.singletonList(m), startTS, key, DEFAULT_TTL);
|
||||
assertTrue(res);
|
||||
res = commitString(Collections.singletonList(key), startTS, commitTS);
|
||||
assertTrue(res);
|
||||
}
|
||||
|
||||
boolean prewriteString(List<Kvrpcpb.Mutation> mutations, long startTS, String primary, long ttl) {
|
||||
return prewrite(mutations, startTS, ByteString.copyFromUtf8(primary), ttl);
|
||||
}
|
||||
|
||||
boolean prewrite(List<Kvrpcpb.Mutation> mutations, long startTS, ByteString primary, long ttl) {
|
||||
if (mutations.size() == 0) return true;
|
||||
BackOffer backOffer = ConcreteBackOffer.newCustomBackOff(1000);
|
||||
|
||||
for (Kvrpcpb.Mutation m : mutations) {
|
||||
while (true) {
|
||||
try {
|
||||
TiRegion region = session.getRegionManager().getRegionByKey(m.getKey());
|
||||
RegionStoreClient client = builder.build(region);
|
||||
client.prewrite(backOffer, primary, Collections.singletonList(m), startTS, ttl, false);
|
||||
break;
|
||||
} catch (RegionException e) {
|
||||
backOffer.doBackOff(BackOffFunction.BackOffFuncType.BoRegionMiss, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
boolean commitString(List<String> keys, long startTS, long commitTS) {
|
||||
return commit(
|
||||
keys.stream().map(ByteString::copyFromUtf8).collect(Collectors.toList()),
|
||||
startTS,
|
||||
commitTS);
|
||||
}
|
||||
|
||||
boolean commit(List<ByteString> keys, long startTS, long commitTS) {
|
||||
if (keys.size() == 0) return true;
|
||||
BackOffer backOffer = ConcreteBackOffer.newCustomBackOff(1000);
|
||||
|
||||
for (ByteString byteStringK : keys) {
|
||||
while (true) {
|
||||
try {
|
||||
TiRegion tiRegion = session.getRegionManager().getRegionByKey(byteStringK);
|
||||
RegionStoreClient client = builder.build(tiRegion);
|
||||
client.commit(backOffer, Collections.singletonList(byteStringK), startTS, commitTS);
|
||||
break;
|
||||
} catch (RegionException e) {
|
||||
backOffer.doBackOff(BackOffFunction.BackOffFuncType.BoRegionMiss, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
String genRandomKey(int strLength) {
|
||||
Random rnd = ThreadLocalRandom.current();
|
||||
String prefix = rnd.nextInt(2) % 2 == 0 ? "a-test-" : "z-test-";
|
||||
StringBuilder ret = new StringBuilder(prefix);
|
||||
for (int i = 0; i < strLength; i++) {
|
||||
boolean isChar = (rnd.nextInt(2) % 2 == 0);
|
||||
if (isChar) {
|
||||
int choice = rnd.nextInt(2) % 2 == 0 ? 65 : 97;
|
||||
ret.append((char) (choice + rnd.nextInt(26)));
|
||||
} else {
|
||||
ret.append(rnd.nextInt(10));
|
||||
}
|
||||
}
|
||||
return ret.toString();
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue