mirror of
https://gitee.com/mirrors/Spring-Cloud-Alibaba.git
synced 2021-06-26 13:25:11 +08:00
Merge branch 'master' of https://github.com/alibaba/spring-cloud-alibaba
This commit is contained in:
commit
dddb952023
@ -1,6 +1,6 @@
|
||||
# Spring Cloud Alibaba
|
||||
|
||||
[](https://circleci.com/gh/spring-cloud-incubator/spring-cloud-alibaba/tree/master)
|
||||
[](https://circleci.com/gh/alibaba/spring-cloud-alibaba/tree/master)
|
||||
[](https://search.maven.org/search?q=g:org.springframework.cloud%20AND%20a:spring-cloud-alibaba-dependencies)
|
||||
[](https://codecov.io/gh/spring-cloud-incubator/spring-cloud-alibaba)
|
||||
[](https://www.apache.org/licenses/LICENSE-2.0.html)
|
||||
@ -109,7 +109,7 @@ Example 列表:
|
||||
|
||||
[RocketMQ Example](https://github.com/spring-cloud-incubator/spring-cloud-alibaba/blob/master/spring-cloud-alibaba-examples/rocketmq-example/readme-zh.md)
|
||||
|
||||
[Fescar Example](https://github.com/spring-cloud-incubator/spring-cloud-alibaba/blob/master/spring-cloud-alibaba-examples/fescar-example/readme-zh.md)
|
||||
[Seata Example](https://github.com/alibaba/spring-cloud-alibaba/blob/finchley/spring-cloud-alibaba-examples/seata-example/readme-zh.md)
|
||||
|
||||
[Alibaba Cloud OSS Example](https://github.com/spring-cloud-incubator/spring-cloud-alibaba/blob/master/spring-cloud-alibaba-examples/oss-example/readme-zh.md)
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Spring Cloud Alibaba
|
||||
|
||||
[](https://circleci.com/gh/spring-cloud-incubator/spring-cloud-alibaba/tree/master)
|
||||
[](https://circleci.com/gh/alibaba/spring-cloud-alibaba/tree/master)
|
||||
[](https://search.maven.org/search?q=g:org.springframework.cloud%20AND%20a:spring-cloud-alibaba-dependencies)
|
||||
[](https://codecov.io/gh/spring-cloud-incubator/spring-cloud-alibaba)
|
||||
[](https://www.apache.org/licenses/LICENSE-2.0.html)
|
||||
|
@ -25,7 +25,7 @@ Apache RocketMQ™ 基于 Java 的高性能、高吞吐量的分布式消息和
|
||||
|
||||
Apache Dubbo™ 是一款高性能 Java RPC 框架。
|
||||
|
||||
**Fescar**
|
||||
**Seata**
|
||||
|
||||
阿里巴巴开源产品,一个易于使用的高性能微服务分布式事务解决方案。
|
||||
|
||||
|
@ -28,7 +28,7 @@ Apache RocketMQ™ is an open source distributed messaging and streaming data pl
|
||||
|
||||
Apache Dubbo™ is a high-performance, Java based open source RPC framework.
|
||||
|
||||
**Fescar**
|
||||
**Seata**
|
||||
|
||||
A distributed transaction solution with high performance and ease of use for microservices architecture.
|
||||
|
||||
|
@ -20,7 +20,7 @@
|
||||
<properties>
|
||||
<sentinel.version>1.6.2</sentinel.version>
|
||||
<oss.version>3.1.0</oss.version>
|
||||
<seata.version>0.5.2</seata.version>
|
||||
<seata.version>0.7.1</seata.version>
|
||||
<nacos.client.version>1.1.1</nacos.client.version>
|
||||
<nacos.config.version>0.8.0</nacos.config.version>
|
||||
<acm.version>1.0.9</acm.version>
|
||||
@ -210,7 +210,7 @@
|
||||
|
||||
<dependency>
|
||||
<groupId>io.seata</groupId>
|
||||
<artifactId>seata-spring</artifactId>
|
||||
<artifactId>seata-all</artifactId>
|
||||
<version>${seata.version}</version>
|
||||
</dependency>
|
||||
|
||||
|
@ -20,6 +20,10 @@
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-starter-web</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-starter-actuator</artifactId>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<build>
|
||||
|
@ -2,3 +2,4 @@ spring.application.name=acm-local
|
||||
server.port=18089
|
||||
spring.cloud.alicloud.acm.server-list=127.0.0.1
|
||||
spring.cloud.alicloud.acm.server-port=8080
|
||||
management.endpoints.web.exposure.include=*
|
@ -19,16 +19,12 @@ transport {
|
||||
#auto default pin or 8
|
||||
worker-thread-size = 8
|
||||
}
|
||||
}
|
||||
store {
|
||||
# branch session size , if exceeded first try compress lockkey, still exceeded throws exceptions
|
||||
max-branch-session-size = 16384
|
||||
# globe session size , if exceeded throws exceptions
|
||||
max-global-session-size = 512
|
||||
# file buffer size , if exceeded allocate new buffer
|
||||
file-write-buffer-cache-size = 16384
|
||||
# when recover batch read size
|
||||
session.reload.read_size = 100
|
||||
shutdown {
|
||||
# when destroy server, wait seconds
|
||||
wait = 3
|
||||
}
|
||||
serialization = "seata"
|
||||
compressor = "none"
|
||||
}
|
||||
service {
|
||||
#vgroup->rgroup
|
||||
@ -39,6 +35,9 @@ service {
|
||||
enableDegrade = false
|
||||
#disable
|
||||
disable = false
|
||||
#unit ms,s,m,h,d represents milliseconds, seconds, minutes, hours, days, default permanent
|
||||
max.commit.retry.timeout = "-1"
|
||||
max.rollback.retry.timeout = "-1"
|
||||
}
|
||||
client {
|
||||
async.commit.buffer.limit = 10000
|
||||
@ -46,4 +45,18 @@ client {
|
||||
retry.internal = 10
|
||||
retry.times = 30
|
||||
}
|
||||
report.retry.count = 5
|
||||
}
|
||||
transaction {
|
||||
undo.data.validation = true
|
||||
undo.log.serialization = "jackson"
|
||||
}
|
||||
|
||||
## metrics settings
|
||||
metrics {
|
||||
enabled = false
|
||||
registry-type = "compact"
|
||||
# multi exporters use comma divided
|
||||
exporter-list = "prometheus"
|
||||
exporter-prometheus-port = 9898
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
registry {
|
||||
# file 、nacos 、eureka、redis、zk、consul
|
||||
# file 、nacos 、eureka、redis、zk、consul、etcd3、sofa
|
||||
type = "file"
|
||||
|
||||
nacos {
|
||||
@ -8,7 +8,7 @@ registry {
|
||||
cluster = "default"
|
||||
}
|
||||
eureka {
|
||||
serviceUrl = "http://localhost:1001/eureka"
|
||||
serviceUrl = "http://localhost:8761/eureka"
|
||||
application = "default"
|
||||
weight = "1"
|
||||
}
|
||||
@ -26,13 +26,26 @@ registry {
|
||||
cluster = "default"
|
||||
serverAddr = "127.0.0.1:8500"
|
||||
}
|
||||
etcd3 {
|
||||
cluster = "default"
|
||||
serverAddr = "http://localhost:2379"
|
||||
}
|
||||
sofa {
|
||||
serverAddr = "127.0.0.1:9603"
|
||||
application = "default"
|
||||
region = "DEFAULT_ZONE"
|
||||
datacenter = "DefaultDataCenter"
|
||||
cluster = "default"
|
||||
group = "SEATA_GROUP"
|
||||
addressWaitTime = "3000"
|
||||
}
|
||||
file {
|
||||
name = "file.conf"
|
||||
}
|
||||
}
|
||||
|
||||
config {
|
||||
# file、nacos 、apollo、zk
|
||||
# file、nacos 、apollo、zk、consul、etcd3
|
||||
type = "file"
|
||||
|
||||
nacos {
|
||||
@ -40,8 +53,11 @@ config {
|
||||
namespace = "public"
|
||||
cluster = "default"
|
||||
}
|
||||
consul {
|
||||
serverAddr = "127.0.0.1:8500"
|
||||
}
|
||||
apollo {
|
||||
app.id = "fescar-server"
|
||||
app.id = "seata-server"
|
||||
apollo.meta = "http://192.168.1.204:8801"
|
||||
}
|
||||
zk {
|
||||
@ -49,6 +65,9 @@ config {
|
||||
session.timeout = 6000
|
||||
connect.timeout = 2000
|
||||
}
|
||||
etcd3 {
|
||||
serverAddr = "http://localhost:2379"
|
||||
}
|
||||
file {
|
||||
name = "file.conf"
|
||||
}
|
||||
|
@ -60,7 +60,7 @@ public class HomeController {
|
||||
}
|
||||
|
||||
@GlobalTransactional(timeoutMills = 300000, name = "spring-cloud-demo-tx")
|
||||
@RequestMapping(value = "/fescar/rest", method = RequestMethod.GET, produces = "application/json")
|
||||
@RequestMapping(value = "/seata/rest", method = RequestMethod.GET, produces = "application/json")
|
||||
public String rest() {
|
||||
|
||||
String result = restTemplate.getForObject(
|
||||
@ -96,7 +96,7 @@ public class HomeController {
|
||||
}
|
||||
|
||||
@GlobalTransactional(timeoutMills = 300000, name = "spring-cloud-demo-tx")
|
||||
@RequestMapping(value = "/fescar/feign", method = RequestMethod.GET, produces = "application/json")
|
||||
@RequestMapping(value = "/seata/feign", method = RequestMethod.GET, produces = "application/json")
|
||||
public String feign() {
|
||||
|
||||
String result = storageService.storage(COMMODITY_CODE, ORDER_COUNT);
|
||||
|
@ -5,4 +5,4 @@ spring.application.name=business-service
|
||||
#feign.hystrix.enabled=true
|
||||
#feign.sentinel.enabled=true
|
||||
|
||||
logging.level.com.alibaba.fescar=debug
|
||||
logging.level.io.seata=debug
|
@ -19,36 +19,12 @@ transport {
|
||||
#auto default pin or 8
|
||||
worker-thread-size = 8
|
||||
}
|
||||
}
|
||||
## transaction log store
|
||||
store {
|
||||
## store mode: file、db
|
||||
mode = "file"
|
||||
|
||||
## file store
|
||||
file {
|
||||
dir = "sessionStore"
|
||||
|
||||
# branch session size , if exceeded first try compress lockkey, still exceeded throws exceptions
|
||||
max-branch-session-size = 16384
|
||||
# globe session size , if exceeded throws exceptions
|
||||
max-global-session-size = 512
|
||||
# file buffer size , if exceeded allocate new buffer
|
||||
file-write-buffer-cache-size = 16384
|
||||
# when recover batch read size
|
||||
session.reload.read_size = 100
|
||||
# async, sync
|
||||
flush-disk-mode = async
|
||||
shutdown {
|
||||
# when destroy server, wait seconds
|
||||
wait = 3
|
||||
}
|
||||
|
||||
## database store
|
||||
db {
|
||||
driver_class = ""
|
||||
url = ""
|
||||
user = ""
|
||||
password = ""
|
||||
}
|
||||
|
||||
serialization = "seata"
|
||||
compressor = "none"
|
||||
}
|
||||
service {
|
||||
#vgroup->rgroup
|
||||
@ -59,6 +35,9 @@ service {
|
||||
enableDegrade = false
|
||||
#disable
|
||||
disable = false
|
||||
#unit ms,s,m,h,d represents milliseconds, seconds, minutes, hours, days, default permanent
|
||||
max.commit.retry.timeout = "-1"
|
||||
max.rollback.retry.timeout = "-1"
|
||||
}
|
||||
client {
|
||||
async.commit.buffer.limit = 10000
|
||||
@ -66,4 +45,18 @@ client {
|
||||
retry.internal = 10
|
||||
retry.times = 30
|
||||
}
|
||||
report.retry.count = 5
|
||||
}
|
||||
transaction {
|
||||
undo.data.validation = true
|
||||
undo.log.serialization = "jackson"
|
||||
}
|
||||
|
||||
## metrics settings
|
||||
metrics {
|
||||
enabled = false
|
||||
registry-type = "compact"
|
||||
# multi exporters use comma divided
|
||||
exporter-list = "prometheus"
|
||||
exporter-prometheus-port = 9898
|
||||
}
|
@ -8,7 +8,7 @@ registry {
|
||||
cluster = "default"
|
||||
}
|
||||
eureka {
|
||||
serviceUrl = "http://localhost:1001/eureka"
|
||||
serviceUrl = "http://localhost:8761/eureka"
|
||||
application = "default"
|
||||
weight = "1"
|
||||
}
|
||||
@ -45,7 +45,7 @@ registry {
|
||||
}
|
||||
|
||||
config {
|
||||
# file、nacos 、apollo、zk
|
||||
# file、nacos 、apollo、zk、consul、etcd3
|
||||
type = "file"
|
||||
|
||||
nacos {
|
||||
@ -53,6 +53,9 @@ config {
|
||||
namespace = "public"
|
||||
cluster = "default"
|
||||
}
|
||||
consul {
|
||||
serverAddr = "127.0.0.1:8500"
|
||||
}
|
||||
apollo {
|
||||
app.id = "seata-server"
|
||||
apollo.meta = "http://192.168.1.204:8801"
|
||||
@ -62,6 +65,9 @@ config {
|
||||
session.timeout = 6000
|
||||
connect.timeout = 2000
|
||||
}
|
||||
etcd3 {
|
||||
serverAddr = "http://localhost:2379"
|
||||
}
|
||||
file {
|
||||
name = "file.conf"
|
||||
}
|
||||
|
@ -19,36 +19,12 @@ transport {
|
||||
#auto default pin or 8
|
||||
worker-thread-size = 8
|
||||
}
|
||||
}
|
||||
## transaction log store
|
||||
store {
|
||||
## store mode: file、db
|
||||
mode = "file"
|
||||
|
||||
## file store
|
||||
file {
|
||||
dir = "sessionStore"
|
||||
|
||||
# branch session size , if exceeded first try compress lockkey, still exceeded throws exceptions
|
||||
max-branch-session-size = 16384
|
||||
# globe session size , if exceeded throws exceptions
|
||||
max-global-session-size = 512
|
||||
# file buffer size , if exceeded allocate new buffer
|
||||
file-write-buffer-cache-size = 16384
|
||||
# when recover batch read size
|
||||
session.reload.read_size = 100
|
||||
# async, sync
|
||||
flush-disk-mode = async
|
||||
shutdown {
|
||||
# when destroy server, wait seconds
|
||||
wait = 3
|
||||
}
|
||||
|
||||
## database store
|
||||
db {
|
||||
driver_class = ""
|
||||
url = ""
|
||||
user = ""
|
||||
password = ""
|
||||
}
|
||||
|
||||
serialization = "seata"
|
||||
compressor = "none"
|
||||
}
|
||||
service {
|
||||
#vgroup->rgroup
|
||||
@ -59,6 +35,9 @@ service {
|
||||
enableDegrade = false
|
||||
#disable
|
||||
disable = false
|
||||
#unit ms,s,m,h,d represents milliseconds, seconds, minutes, hours, days, default permanent
|
||||
max.commit.retry.timeout = "-1"
|
||||
max.rollback.retry.timeout = "-1"
|
||||
}
|
||||
client {
|
||||
async.commit.buffer.limit = 10000
|
||||
@ -66,4 +45,18 @@ client {
|
||||
retry.internal = 10
|
||||
retry.times = 30
|
||||
}
|
||||
report.retry.count = 5
|
||||
}
|
||||
transaction {
|
||||
undo.data.validation = true
|
||||
undo.log.serialization = "jackson"
|
||||
}
|
||||
|
||||
## metrics settings
|
||||
metrics {
|
||||
enabled = false
|
||||
registry-type = "compact"
|
||||
# multi exporters use comma divided
|
||||
exporter-list = "prometheus"
|
||||
exporter-prometheus-port = 9898
|
||||
}
|
@ -8,7 +8,7 @@ registry {
|
||||
cluster = "default"
|
||||
}
|
||||
eureka {
|
||||
serviceUrl = "http://localhost:1001/eureka"
|
||||
serviceUrl = "http://localhost:8761/eureka"
|
||||
application = "default"
|
||||
weight = "1"
|
||||
}
|
||||
@ -45,7 +45,7 @@ registry {
|
||||
}
|
||||
|
||||
config {
|
||||
# file、nacos 、apollo、zk
|
||||
# file、nacos 、apollo、zk、consul、etcd3
|
||||
type = "file"
|
||||
|
||||
nacos {
|
||||
@ -53,6 +53,9 @@ config {
|
||||
namespace = "public"
|
||||
cluster = "default"
|
||||
}
|
||||
consul {
|
||||
serverAddr = "127.0.0.1:8500"
|
||||
}
|
||||
apollo {
|
||||
app.id = "seata-server"
|
||||
apollo.meta = "http://192.168.1.204:8801"
|
||||
@ -62,6 +65,9 @@ config {
|
||||
session.timeout = 6000
|
||||
connect.timeout = 2000
|
||||
}
|
||||
etcd3 {
|
||||
serverAddr = "http://localhost:2379"
|
||||
}
|
||||
file {
|
||||
name = "file.conf"
|
||||
}
|
||||
|
@ -50,6 +50,7 @@ CREATE TABLE `undo_log` (
|
||||
`id` bigint(20) NOT NULL AUTO_INCREMENT,
|
||||
`branch_id` bigint(20) NOT NULL,
|
||||
`xid` varchar(100) NOT NULL,
|
||||
`context` varchar(128) NOT NULL,
|
||||
`rollback_info` longblob NOT NULL,
|
||||
`log_status` int(11) NOT NULL,
|
||||
`log_created` datetime NOT NULL,
|
||||
@ -101,13 +102,13 @@ CREATE TABLE `account_tbl` (
|
||||
进入解压之后的 bin 目录,执行如下命令来启动
|
||||
|
||||
```$shell
|
||||
sh seata-server.sh $LISTEN_PORT $MODE(file or db)
|
||||
sh seata-server.sh -p $LISTEN_PORT -m $MODE(file or db)
|
||||
```
|
||||
|
||||
在这个示例中,采用如下命令来启动 Seata Server
|
||||
|
||||
```$shell
|
||||
sh seata-server.sh 8091 file
|
||||
sh seata-server.sh -p 8091 -m file
|
||||
```
|
||||
|
||||
**注意** 如果你修改了endpoint且注册中心使用默认file类型,那么记得需要在各个示例工程中的 `file.conf` 文件中,修改 grouplist 的值(当registry.conf 中registry.type 或 config.type 为file 时会读取内部的file节点中的文件名,若type不为file将直接从配置类型的对应元数据的注册配置中心读取数据),推荐大家使用 nacos 作为配置注册中心。
|
||||
@ -120,16 +121,16 @@ sh seata-server.sh 8091 file
|
||||
启动示例后,通过 HTTP 的 GET 方法访问如下两个 URL,可以分别验证在 `business-service` 中 通过 RestTemplate 和 FeignClient 调用其他服务的场景。
|
||||
|
||||
```$xslt
|
||||
http://127.0.0.1:18081/fescar/feign
|
||||
http://127.0.0.1:18081/seata/feign
|
||||
|
||||
http://127.0.0.1:18081/fescar/rest
|
||||
http://127.0.0.1:18081/seata/rest
|
||||
```
|
||||
|
||||
## 如何验证分布式事务成功?
|
||||
|
||||
### Xid 信息是否成功传递
|
||||
|
||||
在 `account-server`、`order-service` 和 `storage-service` 三个 服务的 Controller 中,第一个执行的逻辑都是输出 RootContext 中的 Xid 信息,如果看到都输出了正确的 Xid 信息,即每次都发生变化,且同一次调用中所有服务的 Xid 都一致。则表明 Fescar 的 Xid 的传递和还原是正常的。
|
||||
在 `account-server`、`order-service` 和 `storage-service` 三个 服务的 Controller 中,第一个执行的逻辑都是输出 RootContext 中的 Xid 信息,如果看到都输出了正确的 Xid 信息,即每次都发生变化,且同一次调用中所有服务的 Xid 都一致。则表明 Seata 的 Xid 的传递和还原是正常的。
|
||||
|
||||
### 数据库中数据是否一致
|
||||
|
||||
|
@ -19,36 +19,12 @@ transport {
|
||||
#auto default pin or 8
|
||||
worker-thread-size = 8
|
||||
}
|
||||
}
|
||||
## transaction log store
|
||||
store {
|
||||
## store mode: file、db
|
||||
mode = "file"
|
||||
|
||||
## file store
|
||||
file {
|
||||
dir = "sessionStore"
|
||||
|
||||
# branch session size , if exceeded first try compress lockkey, still exceeded throws exceptions
|
||||
max-branch-session-size = 16384
|
||||
# globe session size , if exceeded throws exceptions
|
||||
max-global-session-size = 512
|
||||
# file buffer size , if exceeded allocate new buffer
|
||||
file-write-buffer-cache-size = 16384
|
||||
# when recover batch read size
|
||||
session.reload.read_size = 100
|
||||
# async, sync
|
||||
flush-disk-mode = async
|
||||
shutdown {
|
||||
# when destroy server, wait seconds
|
||||
wait = 3
|
||||
}
|
||||
|
||||
## database store
|
||||
db {
|
||||
driver_class = ""
|
||||
url = ""
|
||||
user = ""
|
||||
password = ""
|
||||
}
|
||||
|
||||
serialization = "seata"
|
||||
compressor = "none"
|
||||
}
|
||||
service {
|
||||
#vgroup->rgroup
|
||||
@ -59,6 +35,9 @@ service {
|
||||
enableDegrade = false
|
||||
#disable
|
||||
disable = false
|
||||
#unit ms,s,m,h,d represents milliseconds, seconds, minutes, hours, days, default permanent
|
||||
max.commit.retry.timeout = "-1"
|
||||
max.rollback.retry.timeout = "-1"
|
||||
}
|
||||
client {
|
||||
async.commit.buffer.limit = 10000
|
||||
@ -66,4 +45,18 @@ client {
|
||||
retry.internal = 10
|
||||
retry.times = 30
|
||||
}
|
||||
report.retry.count = 5
|
||||
}
|
||||
transaction {
|
||||
undo.data.validation = true
|
||||
undo.log.serialization = "jackson"
|
||||
}
|
||||
|
||||
## metrics settings
|
||||
metrics {
|
||||
enabled = false
|
||||
registry-type = "compact"
|
||||
# multi exporters use comma divided
|
||||
exporter-list = "prometheus"
|
||||
exporter-prometheus-port = 9898
|
||||
}
|
@ -8,7 +8,7 @@ registry {
|
||||
cluster = "default"
|
||||
}
|
||||
eureka {
|
||||
serviceUrl = "http://localhost:1001/eureka"
|
||||
serviceUrl = "http://localhost:8761/eureka"
|
||||
application = "default"
|
||||
weight = "1"
|
||||
}
|
||||
@ -45,7 +45,7 @@ registry {
|
||||
}
|
||||
|
||||
config {
|
||||
# file、nacos 、apollo、zk
|
||||
# file、nacos 、apollo、zk、consul、etcd3
|
||||
type = "file"
|
||||
|
||||
nacos {
|
||||
@ -53,6 +53,9 @@ config {
|
||||
namespace = "public"
|
||||
cluster = "default"
|
||||
}
|
||||
consul {
|
||||
serverAddr = "127.0.0.1:8500"
|
||||
}
|
||||
apollo {
|
||||
app.id = "seata-server"
|
||||
apollo.meta = "http://192.168.1.204:8801"
|
||||
@ -62,6 +65,9 @@ config {
|
||||
session.timeout = 6000
|
||||
connect.timeout = 2000
|
||||
}
|
||||
etcd3 {
|
||||
serverAddr = "http://localhost:2379"
|
||||
}
|
||||
file {
|
||||
name = "file.conf"
|
||||
}
|
||||
|
@ -66,7 +66,7 @@ Provider端在application.properties文件中定义dubbo相关的配置,比如
|
||||
|
||||
`sentinel-dubbo-api`模块中定义了FooService服务,内容如下:
|
||||
|
||||
package FooService;
|
||||
package com.alibaba.cloud.examples;
|
||||
public interface FooService {
|
||||
String hello(String name);
|
||||
}
|
||||
@ -93,7 +93,7 @@ Consumer端在服务调用之前,先定义限流规则。
|
||||
|
||||
`sentinel-dubbo-api`模块中定义了FooService服务,内容如下:
|
||||
|
||||
package FooService;
|
||||
package com.alibaba.cloud.examples;
|
||||
public interface FooService {
|
||||
String hello(String name);
|
||||
}
|
||||
|
@ -64,7 +64,7 @@ Define some configs of dubbo in `application.properties` in provider side, like
|
||||
|
||||
`sentinel-dubbo-api` define a service named FooService:
|
||||
|
||||
package FooService;
|
||||
package com.alibaba.cloud.examples;
|
||||
public interface FooService {
|
||||
String hello(String name);
|
||||
}
|
||||
@ -91,7 +91,7 @@ We will configure flow control rules before service invocation in consumer side.
|
||||
|
||||
`sentinel-dubbo-api` define a service named FooService:
|
||||
|
||||
package FooService;
|
||||
package com.alibaba.cloud.examples;
|
||||
public interface FooService {
|
||||
String hello(String name);
|
||||
}
|
||||
|
@ -44,6 +44,12 @@
|
||||
<artifactId>spring-cloud-starter-alibaba-nacos-discovery</artifactId>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>javax.servlet</groupId>
|
||||
<artifactId>javax.servlet-api</artifactId>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
|
||||
</dependencies>
|
||||
|
||||
<build>
|
||||
|
@ -155,6 +155,21 @@ public class NacosDiscoveryProperties {
|
||||
*/
|
||||
private String secretKey;
|
||||
|
||||
/**
|
||||
* Heart beat interval. Time unit: second.
|
||||
*/
|
||||
private Integer heartBeatInterval;
|
||||
|
||||
/**
|
||||
* Heart beat timeout. Time unit: second.
|
||||
*/
|
||||
private Integer heartBeatTimeout;
|
||||
|
||||
/**
|
||||
* Ip delete timeout. Time unit: second.
|
||||
*/
|
||||
private Integer ipDeleteTimeout;
|
||||
|
||||
@Autowired
|
||||
private InetUtils inetUtils;
|
||||
|
||||
@ -339,6 +354,30 @@ public class NacosDiscoveryProperties {
|
||||
this.secretKey = secretKey;
|
||||
}
|
||||
|
||||
public Integer getHeartBeatInterval() {
|
||||
return heartBeatInterval;
|
||||
}
|
||||
|
||||
public void setHeartBeatInterval(Integer heartBeatInterval) {
|
||||
this.heartBeatInterval = heartBeatInterval;
|
||||
}
|
||||
|
||||
public Integer getHeartBeatTimeout() {
|
||||
return heartBeatTimeout;
|
||||
}
|
||||
|
||||
public void setHeartBeatTimeout(Integer heartBeatTimeout) {
|
||||
this.heartBeatTimeout = heartBeatTimeout;
|
||||
}
|
||||
|
||||
public Integer getIpDeleteTimeout() {
|
||||
return ipDeleteTimeout;
|
||||
}
|
||||
|
||||
public void setIpDeleteTimeout(Integer ipDeleteTimeout) {
|
||||
this.ipDeleteTimeout = ipDeleteTimeout;
|
||||
}
|
||||
|
||||
public String getNamingLoadCacheAtStart() {
|
||||
return namingLoadCacheAtStart;
|
||||
}
|
||||
|
@ -31,6 +31,7 @@ import org.springframework.util.StringUtils;
|
||||
|
||||
import com.alibaba.cloud.nacos.NacosDiscoveryProperties;
|
||||
import com.alibaba.nacos.api.naming.NamingService;
|
||||
import com.alibaba.nacos.api.naming.PreservedMetadataKeys;
|
||||
|
||||
/**
|
||||
* @author xiaojing
|
||||
@ -76,6 +77,19 @@ public class NacosRegistration implements Registration, ServiceInstance {
|
||||
metadata.put(MANAGEMENT_ADDRESS, address);
|
||||
}
|
||||
}
|
||||
|
||||
if (null != nacosDiscoveryProperties.getHeartBeatInterval()) {
|
||||
metadata.put(PreservedMetadataKeys.HEART_BEAT_INTERVAL,
|
||||
nacosDiscoveryProperties.getHeartBeatInterval().toString());
|
||||
}
|
||||
if (null != nacosDiscoveryProperties.getHeartBeatTimeout()) {
|
||||
metadata.put(PreservedMetadataKeys.HEART_BEAT_TIMEOUT,
|
||||
nacosDiscoveryProperties.getHeartBeatTimeout().toString());
|
||||
}
|
||||
if (null != nacosDiscoveryProperties.getIpDeleteTimeout()) {
|
||||
metadata.put(PreservedMetadataKeys.IP_DELETE_TIMEOUT,
|
||||
nacosDiscoveryProperties.getIpDeleteTimeout().toString());
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -56,7 +56,10 @@ import com.alibaba.cloud.nacos.endpoint.NacosDiscoveryEndpoint;
|
||||
"spring.cloud.nacos.discovery.namingLoadCacheAtStart=true",
|
||||
"spring.cloud.nacos.discovery.secure=true",
|
||||
"spring.cloud.nacos.discovery.accessKey=test-accessKey",
|
||||
"spring.cloud.nacos.discovery.secretKey=test-secretKey" }, webEnvironment = RANDOM_PORT)
|
||||
"spring.cloud.nacos.discovery.secretKey=test-secretKey",
|
||||
"spring.cloud.nacos.discovery.heart-beat-interval=3",
|
||||
"spring.cloud.nacos.discovery.heart-beat-timeout=6",
|
||||
"spring.cloud.nacos.discovery.ip-delete-timeout=9", }, webEnvironment = RANDOM_PORT)
|
||||
public class NacosAutoServiceRegistrationTests {
|
||||
|
||||
@Autowired
|
||||
@ -92,6 +95,9 @@ public class NacosAutoServiceRegistrationTests {
|
||||
checkoutNacosDiscoverySecure();
|
||||
checkoutNacosDiscoveryAccessKey();
|
||||
checkoutNacosDiscoverySecrectKey();
|
||||
checkoutNacosDiscoveryHeartBeatInterval();
|
||||
checkoutNacosDiscoveryHeartBeatTimeout();
|
||||
checkoutNacosDiscoveryIpDeleteTimeout();
|
||||
|
||||
checkoutNacosDiscoveryServiceName();
|
||||
checkoutNacosDiscoveryServiceIP();
|
||||
@ -162,6 +168,21 @@ public class NacosAutoServiceRegistrationTests {
|
||||
properties.getSecretKey());
|
||||
}
|
||||
|
||||
private void checkoutNacosDiscoveryHeartBeatInterval() {
|
||||
assertEquals("NacosDiscoveryProperties heart beat interval was wrong",
|
||||
Integer.valueOf(3), properties.getHeartBeatInterval());
|
||||
}
|
||||
|
||||
private void checkoutNacosDiscoveryHeartBeatTimeout() {
|
||||
assertEquals("NacosDiscoveryProperties heart beat timeout was wrong",
|
||||
Integer.valueOf(6), properties.getHeartBeatTimeout());
|
||||
}
|
||||
|
||||
private void checkoutNacosDiscoveryIpDeleteTimeout() {
|
||||
assertEquals("NacosDiscoveryProperties ip delete timeout was wrong",
|
||||
Integer.valueOf(9), properties.getIpDeleteTimeout());
|
||||
}
|
||||
|
||||
private void checkoutNacosDiscoveryServiceName() {
|
||||
assertEquals("NacosDiscoveryProperties service name was wrong", "myTestService1",
|
||||
properties.getService());
|
||||
|
@ -16,7 +16,7 @@
|
||||
|
||||
<dependency>
|
||||
<groupId>io.seata</groupId>
|
||||
<artifactId>seata-spring</artifactId>
|
||||
<artifactId>seata-all</artifactId>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
|
@ -12,7 +12,7 @@ import org.springframework.context.annotation.Configuration;
|
||||
@Configuration
|
||||
@EnableConfigurationProperties(SmsProperties.class)
|
||||
@ConditionalOnClass(name = "com.aliyuncs.dysmsapi.model.v20170525.SendSmsRequest")
|
||||
@ConditionalOnProperty(value = "spring.cloud.alibaba.deshao.enable.sms", matchIfMissing = true)
|
||||
@ConditionalOnProperty(name = "spring.cloud.alicloud.sms.enabled", matchIfMissing = true)
|
||||
public class SmsContextAutoConfiguration {
|
||||
|
||||
}
|
@ -14,7 +14,7 @@
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.springframework.cloud.bus.rocketmq.env;
|
||||
package com.alibaba.cloud.bus.rocketmq.env;
|
||||
|
||||
import static org.springframework.cloud.bus.SpringCloudBusClient.INPUT;
|
||||
|
@ -1,3 +1,3 @@
|
||||
# EnvironmentPostProcessor
|
||||
org.springframework.boot.env.EnvironmentPostProcessor=\
|
||||
org.springframework.cloud.bus.rocketmq.env.RocketMQBusEnvironmentPostProcessor
|
||||
com.alibaba.cloud.bus.rocketmq.env.RocketMQBusEnvironmentPostProcessor
|
Loading…
x
Reference in New Issue
Block a user