1
0
mirror of https://gitee.com/mirrors/Spring-Cloud-Alibaba.git synced 2021-06-26 13:25:11 +08:00

sync to finchley before graduation

This commit is contained in:
fangjian0423
2019-07-27 18:16:32 +08:00
parent c459080f05
commit 301d133238
33 changed files with 671 additions and 155 deletions

View File

@@ -20,6 +20,10 @@
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-web</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-actuator</artifactId>
</dependency>
</dependencies>
<build>

View File

@@ -1,4 +1,5 @@
spring.application.name=acm-local
server.port=18089
spring.cloud.alicloud.acm.server-list=127.0.0.1
spring.cloud.alicloud.acm.server-port=8080
spring.cloud.alicloud.acm.server-port=8080
management.endpoints.web.exposure.include=*

View File

@@ -0,0 +1,7 @@
#!/usr/bin/env bash
n=1
while [ $n -le 10 ]
do
echo `curl -s http://localhost:18083/divide-feign2?a=1`
let n++
done

View File

@@ -49,6 +49,10 @@ public class ConsumerApplication {
@RequestMapping(value = "/divide", method = RequestMethod.GET)
String divide(@RequestParam("a") Integer a, @RequestParam("b") Integer b);
default String divide(Integer a) {
return divide(a, 0);
}
@RequestMapping(value = "/notFound", method = RequestMethod.GET)
String notFound();
}

View File

@@ -75,6 +75,11 @@ public class TestController {
return echoService.divide(a, b);
}
@RequestMapping(value = "/divide-feign2", method = RequestMethod.GET)
public String divide(@RequestParam Integer a) {
return echoService.divide(a);
}
@RequestMapping(value = "/echo-feign/{str}", method = RequestMethod.GET)
public String feign(@PathVariable String str) {
return echoService.echo(str);

View File

@@ -19,16 +19,12 @@ transport {
#auto default pin or 8
worker-thread-size = 8
}
}
store {
# branch session size , if exceeded first try compress lockkey, still exceeded throws exceptions
max-branch-session-size = 16384
# globe session size , if exceeded throws exceptions
max-global-session-size = 512
# file buffer size , if exceeded allocate new buffer
file-write-buffer-cache-size = 16384
# when recover batch read size
session.reload.read_size = 100
shutdown {
# when destroy server, wait seconds
wait = 3
}
serialization = "seata"
compressor = "none"
}
service {
#vgroup->rgroup
@@ -39,6 +35,9 @@ service {
enableDegrade = false
#disable
disable = false
#unit ms,s,m,h,d represents milliseconds, seconds, minutes, hours, days, default permanent
max.commit.retry.timeout = "-1"
max.rollback.retry.timeout = "-1"
}
client {
async.commit.buffer.limit = 10000
@@ -46,4 +45,18 @@ client {
retry.internal = 10
retry.times = 30
}
report.retry.count = 5
}
transaction {
undo.data.validation = true
undo.log.serialization = "jackson"
}
## metrics settings
metrics {
enabled = false
registry-type = "compact"
# multi exporters use comma divided
exporter-list = "prometheus"
exporter-prometheus-port = 9898
}

View File

@@ -1,5 +1,5 @@
registry {
# file 、nacos 、eureka、redis、zk、consul
# file 、nacos 、eureka、redis、zk、consul、etcd3、sofa
type = "file"
nacos {
@@ -8,7 +8,7 @@ registry {
cluster = "default"
}
eureka {
serviceUrl = "http://localhost:1001/eureka"
serviceUrl = "http://localhost:8761/eureka"
application = "default"
weight = "1"
}
@@ -26,13 +26,26 @@ registry {
cluster = "default"
serverAddr = "127.0.0.1:8500"
}
etcd3 {
cluster = "default"
serverAddr = "http://localhost:2379"
}
sofa {
serverAddr = "127.0.0.1:9603"
application = "default"
region = "DEFAULT_ZONE"
datacenter = "DefaultDataCenter"
cluster = "default"
group = "SEATA_GROUP"
addressWaitTime = "3000"
}
file {
name = "file.conf"
}
}
config {
# file、nacos 、apollo、zk
# file、nacos 、apollo、zk、consul、etcd3
type = "file"
nacos {
@@ -40,8 +53,11 @@ config {
namespace = "public"
cluster = "default"
}
consul {
serverAddr = "127.0.0.1:8500"
}
apollo {
app.id = "fescar-server"
app.id = "seata-server"
apollo.meta = "http://192.168.1.204:8801"
}
zk {
@@ -49,7 +65,10 @@ config {
session.timeout = 6000
connect.timeout = 2000
}
etcd3 {
serverAddr = "http://localhost:2379"
}
file {
name = "file.conf"
}
}
}

View File

@@ -19,36 +19,12 @@ transport {
#auto default pin or 8
worker-thread-size = 8
}
}
## transaction log store
store {
## store mode: file、db
mode = "file"
## file store
file {
dir = "sessionStore"
# branch session size , if exceeded first try compress lockkey, still exceeded throws exceptions
max-branch-session-size = 16384
# globe session size , if exceeded throws exceptions
max-global-session-size = 512
# file buffer size , if exceeded allocate new buffer
file-write-buffer-cache-size = 16384
# when recover batch read size
session.reload.read_size = 100
# async, sync
flush-disk-mode = async
shutdown {
# when destroy server, wait seconds
wait = 3
}
## database store
db {
driver_class = ""
url = ""
user = ""
password = ""
}
serialization = "seata"
compressor = "none"
}
service {
#vgroup->rgroup
@@ -59,6 +35,9 @@ service {
enableDegrade = false
#disable
disable = false
#unit ms,s,m,h,d represents milliseconds, seconds, minutes, hours, days, default permanent
max.commit.retry.timeout = "-1"
max.rollback.retry.timeout = "-1"
}
client {
async.commit.buffer.limit = 10000
@@ -66,4 +45,18 @@ client {
retry.internal = 10
retry.times = 30
}
report.retry.count = 5
}
transaction {
undo.data.validation = true
undo.log.serialization = "jackson"
}
## metrics settings
metrics {
enabled = false
registry-type = "compact"
# multi exporters use comma divided
exporter-list = "prometheus"
exporter-prometheus-port = 9898
}

View File

@@ -8,7 +8,7 @@ registry {
cluster = "default"
}
eureka {
serviceUrl = "http://localhost:1001/eureka"
serviceUrl = "http://localhost:8761/eureka"
application = "default"
weight = "1"
}
@@ -45,7 +45,7 @@ registry {
}
config {
# file、nacos 、apollo、zk
# file、nacos 、apollo、zk、consul、etcd3
type = "file"
nacos {
@@ -53,6 +53,9 @@ config {
namespace = "public"
cluster = "default"
}
consul {
serverAddr = "127.0.0.1:8500"
}
apollo {
app.id = "seata-server"
apollo.meta = "http://192.168.1.204:8801"
@@ -62,7 +65,10 @@ config {
session.timeout = 6000
connect.timeout = 2000
}
etcd3 {
serverAddr = "http://localhost:2379"
}
file {
name = "file.conf"
}
}
}

View File

@@ -19,36 +19,12 @@ transport {
#auto default pin or 8
worker-thread-size = 8
}
}
## transaction log store
store {
## store mode: file、db
mode = "file"
## file store
file {
dir = "sessionStore"
# branch session size , if exceeded first try compress lockkey, still exceeded throws exceptions
max-branch-session-size = 16384
# globe session size , if exceeded throws exceptions
max-global-session-size = 512
# file buffer size , if exceeded allocate new buffer
file-write-buffer-cache-size = 16384
# when recover batch read size
session.reload.read_size = 100
# async, sync
flush-disk-mode = async
shutdown {
# when destroy server, wait seconds
wait = 3
}
## database store
db {
driver_class = ""
url = ""
user = ""
password = ""
}
serialization = "seata"
compressor = "none"
}
service {
#vgroup->rgroup
@@ -59,6 +35,9 @@ service {
enableDegrade = false
#disable
disable = false
#unit ms,s,m,h,d represents milliseconds, seconds, minutes, hours, days, default permanent
max.commit.retry.timeout = "-1"
max.rollback.retry.timeout = "-1"
}
client {
async.commit.buffer.limit = 10000
@@ -66,4 +45,18 @@ client {
retry.internal = 10
retry.times = 30
}
report.retry.count = 5
}
transaction {
undo.data.validation = true
undo.log.serialization = "jackson"
}
## metrics settings
metrics {
enabled = false
registry-type = "compact"
# multi exporters use comma divided
exporter-list = "prometheus"
exporter-prometheus-port = 9898
}

View File

@@ -8,7 +8,7 @@ registry {
cluster = "default"
}
eureka {
serviceUrl = "http://localhost:1001/eureka"
serviceUrl = "http://localhost:8761/eureka"
application = "default"
weight = "1"
}
@@ -45,7 +45,7 @@ registry {
}
config {
# file、nacos 、apollo、zk
# file、nacos 、apollo、zk、consul、etcd3
type = "file"
nacos {
@@ -53,6 +53,9 @@ config {
namespace = "public"
cluster = "default"
}
consul {
serverAddr = "127.0.0.1:8500"
}
apollo {
app.id = "seata-server"
apollo.meta = "http://192.168.1.204:8801"
@@ -62,7 +65,10 @@ config {
session.timeout = 6000
connect.timeout = 2000
}
etcd3 {
serverAddr = "http://localhost:2379"
}
file {
name = "file.conf"
}
}
}

View File

@@ -50,6 +50,7 @@ CREATE TABLE `undo_log` (
`id` bigint(20) NOT NULL AUTO_INCREMENT,
`branch_id` bigint(20) NOT NULL,
`xid` varchar(100) NOT NULL,
`context` varchar(128) NOT NULL,
`rollback_info` longblob NOT NULL,
`log_status` int(11) NOT NULL,
`log_created` datetime NOT NULL,
@@ -101,13 +102,13 @@ CREATE TABLE `account_tbl` (
进入解压之后的 bin 目录,执行如下命令来启动
```$shell
sh seata-server.sh $LISTEN_PORT $MODE(file or db)
sh seata-server.sh -p $LISTEN_PORT -m $MODE(file or db)
```
在这个示例中,采用如下命令来启动 Seata Server
```$shell
sh seata-server.sh 8091 file
sh seata-server.sh -p 8091 -m file
```
**注意** 如果你修改了endpoint且注册中心使用默认file类型那么记得需要在各个示例工程中的 `file.conf` 文件中,修改 grouplist 的值(当registry.conf 中registry.type 或 config.type 为file 时会读取内部的file节点中的文件名若type不为file将直接从配置类型的对应元数据的注册配置中心读取数据),推荐大家使用 nacos 作为配置注册中心。

View File

@@ -19,36 +19,12 @@ transport {
#auto default pin or 8
worker-thread-size = 8
}
}
## transaction log store
store {
## store mode: file、db
mode = "file"
## file store
file {
dir = "sessionStore"
# branch session size , if exceeded first try compress lockkey, still exceeded throws exceptions
max-branch-session-size = 16384
# globe session size , if exceeded throws exceptions
max-global-session-size = 512
# file buffer size , if exceeded allocate new buffer
file-write-buffer-cache-size = 16384
# when recover batch read size
session.reload.read_size = 100
# async, sync
flush-disk-mode = async
shutdown {
# when destroy server, wait seconds
wait = 3
}
## database store
db {
driver_class = ""
url = ""
user = ""
password = ""
}
serialization = "seata"
compressor = "none"
}
service {
#vgroup->rgroup
@@ -59,6 +35,9 @@ service {
enableDegrade = false
#disable
disable = false
#unit ms,s,m,h,d represents milliseconds, seconds, minutes, hours, days, default permanent
max.commit.retry.timeout = "-1"
max.rollback.retry.timeout = "-1"
}
client {
async.commit.buffer.limit = 10000
@@ -66,4 +45,18 @@ client {
retry.internal = 10
retry.times = 30
}
report.retry.count = 5
}
transaction {
undo.data.validation = true
undo.log.serialization = "jackson"
}
## metrics settings
metrics {
enabled = false
registry-type = "compact"
# multi exporters use comma divided
exporter-list = "prometheus"
exporter-prometheus-port = 9898
}

View File

@@ -8,7 +8,7 @@ registry {
cluster = "default"
}
eureka {
serviceUrl = "http://localhost:1001/eureka"
serviceUrl = "http://localhost:8761/eureka"
application = "default"
weight = "1"
}
@@ -45,7 +45,7 @@ registry {
}
config {
# file、nacos 、apollo、zk
# file、nacos 、apollo、zk、consul、etcd3
type = "file"
nacos {
@@ -53,6 +53,9 @@ config {
namespace = "public"
cluster = "default"
}
consul {
serverAddr = "127.0.0.1:8500"
}
apollo {
app.id = "seata-server"
apollo.meta = "http://192.168.1.204:8801"
@@ -62,7 +65,10 @@ config {
session.timeout = 6000
connect.timeout = 2000
}
etcd3 {
serverAddr = "http://localhost:2379"
}
file {
name = "file.conf"
}
}
}

View File

@@ -43,6 +43,10 @@
<!--<groupId>com.alibaba.csp</groupId>-->
<!--<artifactId>sentinel-datasource-apollo</artifactId>-->
<!--</dependency>-->
<dependency>
<groupId>com.alibaba.csp</groupId>
<artifactId>sentinel-datasource-redis</artifactId>
</dependency>
<!-- define in spring-boot-autoconfigure module -->
<!--<dependency>-->
<!--<groupId>com.fasterxml.jackson.dataformat</groupId>-->

View File

@@ -214,9 +214,9 @@ spring.cloud.sentinel.datasource.ds2.nacos.data-type=json
`ds1` 和 `ds2` 表示ReadableDataSource的名称可随意编写。`ds1` 和 `ds2` 后面的 `file` 和 `nacos` 表示ReadableDataSource的类型。
目前支持`file`, `nacos`, `zk`, `apollo` 这4种类型。
目前支持`file`, `nacos`, `zk`, `apollo``redis`5种类型。
其中`nacos``zk``apollo`这3种类型的使用需要加上对应的依赖`sentinel-datasource-nacos`, `sentinel-datasource-zookeeper`, `sentinel-datasource-apollo`。
其中`nacos``zk``apollo``redis` 这4种类型的使用需要加上对应的依赖`sentinel-datasource-nacos`, `sentinel-datasource-zookeeper`, `sentinel-datasource-apollo`, `sentinel-datasource-redis`
当ReadableDataSource加载规则数据成功的时候控制台会打印出相应的日志信息

View File

@@ -189,9 +189,9 @@ spring.cloud.sentinel.datasource.ds2.nacos.data-type=json
`ds1` and `ds2` means the name of ReadableDataSource, you can write whatever you want. The `file` and `nacos` after name `ds1` and `ds2` means the type of ReadableDataSource.
Now ReadableDataSource type support 4 categories: `file`, `nacos`, `zk` and `apollo`.
Now ReadableDataSource type support 5 categories: `file`, `nacos`, `zk`, `apollo` and `redis`.
If you want to use `nacos`, `zk` or `apollo` ReadableDataSource, you could add `sentinel-datasource-nacos`, `sentinel-datasource-zookeeper` or `sentinel-datasource-apollo` dependency.
If you want to use `nacos`, `zk`, `apollo` or `redis` ReadableDataSource, you could add `sentinel-datasource-nacos`, `sentinel-datasource-zookeeper`,`sentinel-datasource-apollo` or `sentinel-datasource-redis` dependency.
When ReadableDataSource load rule data successfully, console will print some logs: