1
0
mirror of https://gitee.com/mirrors/Spring-Cloud-Alibaba.git synced 2021-06-26 13:25:11 +08:00

sync to finchley before graduation

This commit is contained in:
fangjian0423 2019-07-27 18:16:32 +08:00
parent c459080f05
commit 301d133238
33 changed files with 671 additions and 155 deletions

View File

@ -1,6 +1,6 @@
# Spring Cloud Alibaba
[![CircleCI](https://circleci.com/gh/spring-cloud-incubator/spring-cloud-alibaba/tree/master.svg?style=svg)](https://circleci.com/gh/spring-cloud-incubator/spring-cloud-alibaba/tree/master)
[![CircleCI](https://circleci.com/gh/alibaba/spring-cloud-alibaba/tree/master.svg?style=svg)](https://circleci.com/gh/alibaba/spring-cloud-alibaba/tree/master)
[![Maven Central](https://img.shields.io/maven-central/v/org.springframework.cloud/spring-cloud-alibaba-dependencies.svg?label=Maven%20Central)](https://search.maven.org/search?q=g:org.springframework.cloud%20AND%20a:spring-cloud-alibaba-dependencies)
[![Codecov](https://codecov.io/gh/spring-cloud-incubator/spring-cloud-alibaba/branch/master/graph/badge.svg)](https://codecov.io/gh/spring-cloud-incubator/spring-cloud-alibaba)
[![License](https://img.shields.io/badge/license-Apache%202-4EB1BA.svg)](https://www.apache.org/licenses/LICENSE-2.0.html)
@ -138,4 +138,4 @@ spring-cloud-alibaba@googlegroups.com欢迎通过此邮件列表讨论与 spr
### 钉钉群
![DingQR](https://cdn.nlark.com/lark/0/2018/png/54319/1544667717958-b3022f21-3357-4270-836d-4064e7ac728c.png)
![DingQR](https://img.alicdn.com/tfs/TB1fr81ba61gK0jSZFlXXXDKFXa-892-1213.jpg)

View File

@ -1,6 +1,6 @@
# Spring Cloud Alibaba
[![CircleCI](https://circleci.com/gh/spring-cloud-incubator/spring-cloud-alibaba/tree/master.svg?style=svg)](https://circleci.com/gh/spring-cloud-incubator/spring-cloud-alibaba/tree/master)
[![CircleCI](https://circleci.com/gh/alibaba/spring-cloud-alibaba/tree/master.svg?style=svg)](https://circleci.com/gh/alibaba/spring-cloud-alibaba/tree/master)
[![Maven Central](https://img.shields.io/maven-central/v/org.springframework.cloud/spring-cloud-alibaba-dependencies.svg?label=Maven%20Central)](https://search.maven.org/search?q=g:org.springframework.cloud%20AND%20a:spring-cloud-alibaba-dependencies)
[![Codecov](https://codecov.io/gh/spring-cloud-incubator/spring-cloud-alibaba/branch/master/graph/badge.svg)](https://codecov.io/gh/spring-cloud-incubator/spring-cloud-alibaba)
[![License](https://img.shields.io/badge/license-Apache%202-4EB1BA.svg)](https://www.apache.org/licenses/LICENSE-2.0.html)

View File

@ -20,7 +20,7 @@
<properties>
<sentinel.version>1.6.2</sentinel.version>
<oss.version>3.1.0</oss.version>
<seata.version>0.5.1</seata.version>
<seata.version>0.7.1</seata.version>
<nacos.client.version>1.1.1</nacos.client.version>
<nacos.config.version>0.8.0</nacos.config.version>
<acm.version>1.0.9</acm.version>
@ -143,6 +143,11 @@
<artifactId>sentinel-datasource-nacos</artifactId>
<version>${sentinel.version}</version>
</dependency>
<dependency>
<groupId>com.alibaba.csp</groupId>
<artifactId>sentinel-datasource-redis</artifactId>
<version>${sentinel.version}</version>
</dependency>
<dependency>
<groupId>com.alibaba.csp</groupId>
<artifactId>sentinel-web-servlet</artifactId>
@ -211,7 +216,7 @@
<dependency>
<groupId>io.seata</groupId>
<artifactId>seata-spring</artifactId>
<artifactId>seata-all</artifactId>
<version>${seata.version}</version>
</dependency>

View File

@ -20,6 +20,10 @@
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-web</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-actuator</artifactId>
</dependency>
</dependencies>
<build>

View File

@ -1,4 +1,5 @@
spring.application.name=acm-local
server.port=18089
spring.cloud.alicloud.acm.server-list=127.0.0.1
spring.cloud.alicloud.acm.server-port=8080
spring.cloud.alicloud.acm.server-port=8080
management.endpoints.web.exposure.include=*

View File

@ -0,0 +1,7 @@
#!/usr/bin/env bash
n=1
while [ $n -le 10 ]
do
echo `curl -s http://localhost:18083/divide-feign2?a=1`
let n++
done

View File

@ -49,6 +49,10 @@ public class ConsumerApplication {
@RequestMapping(value = "/divide", method = RequestMethod.GET)
String divide(@RequestParam("a") Integer a, @RequestParam("b") Integer b);
default String divide(Integer a) {
return divide(a, 0);
}
@RequestMapping(value = "/notFound", method = RequestMethod.GET)
String notFound();
}

View File

@ -75,6 +75,11 @@ public class TestController {
return echoService.divide(a, b);
}
@RequestMapping(value = "/divide-feign2", method = RequestMethod.GET)
public String divide(@RequestParam Integer a) {
return echoService.divide(a);
}
@RequestMapping(value = "/echo-feign/{str}", method = RequestMethod.GET)
public String feign(@PathVariable String str) {
return echoService.echo(str);

View File

@ -19,16 +19,12 @@ transport {
#auto default pin or 8
worker-thread-size = 8
}
}
store {
# branch session size , if exceeded first try compress lockkey, still exceeded throws exceptions
max-branch-session-size = 16384
# globe session size , if exceeded throws exceptions
max-global-session-size = 512
# file buffer size , if exceeded allocate new buffer
file-write-buffer-cache-size = 16384
# when recover batch read size
session.reload.read_size = 100
shutdown {
# when destroy server, wait seconds
wait = 3
}
serialization = "seata"
compressor = "none"
}
service {
#vgroup->rgroup
@ -39,6 +35,9 @@ service {
enableDegrade = false
#disable
disable = false
#unit ms,s,m,h,d represents milliseconds, seconds, minutes, hours, days, default permanent
max.commit.retry.timeout = "-1"
max.rollback.retry.timeout = "-1"
}
client {
async.commit.buffer.limit = 10000
@ -46,4 +45,18 @@ client {
retry.internal = 10
retry.times = 30
}
report.retry.count = 5
}
transaction {
undo.data.validation = true
undo.log.serialization = "jackson"
}
## metrics settings
metrics {
enabled = false
registry-type = "compact"
# multi exporters use comma divided
exporter-list = "prometheus"
exporter-prometheus-port = 9898
}

View File

@ -1,5 +1,5 @@
registry {
# file 、nacos 、eureka、redis、zk、consul
# file 、nacos 、eureka、redis、zk、consul、etcd3、sofa
type = "file"
nacos {
@ -8,7 +8,7 @@ registry {
cluster = "default"
}
eureka {
serviceUrl = "http://localhost:1001/eureka"
serviceUrl = "http://localhost:8761/eureka"
application = "default"
weight = "1"
}
@ -26,13 +26,26 @@ registry {
cluster = "default"
serverAddr = "127.0.0.1:8500"
}
etcd3 {
cluster = "default"
serverAddr = "http://localhost:2379"
}
sofa {
serverAddr = "127.0.0.1:9603"
application = "default"
region = "DEFAULT_ZONE"
datacenter = "DefaultDataCenter"
cluster = "default"
group = "SEATA_GROUP"
addressWaitTime = "3000"
}
file {
name = "file.conf"
}
}
config {
# file、nacos 、apollo、zk
# file、nacos 、apollo、zk、consul、etcd3
type = "file"
nacos {
@ -40,8 +53,11 @@ config {
namespace = "public"
cluster = "default"
}
consul {
serverAddr = "127.0.0.1:8500"
}
apollo {
app.id = "fescar-server"
app.id = "seata-server"
apollo.meta = "http://192.168.1.204:8801"
}
zk {
@ -49,7 +65,10 @@ config {
session.timeout = 6000
connect.timeout = 2000
}
etcd3 {
serverAddr = "http://localhost:2379"
}
file {
name = "file.conf"
}
}
}

View File

@ -19,36 +19,12 @@ transport {
#auto default pin or 8
worker-thread-size = 8
}
}
## transaction log store
store {
## store mode: file、db
mode = "file"
## file store
file {
dir = "sessionStore"
# branch session size , if exceeded first try compress lockkey, still exceeded throws exceptions
max-branch-session-size = 16384
# globe session size , if exceeded throws exceptions
max-global-session-size = 512
# file buffer size , if exceeded allocate new buffer
file-write-buffer-cache-size = 16384
# when recover batch read size
session.reload.read_size = 100
# async, sync
flush-disk-mode = async
shutdown {
# when destroy server, wait seconds
wait = 3
}
## database store
db {
driver_class = ""
url = ""
user = ""
password = ""
}
serialization = "seata"
compressor = "none"
}
service {
#vgroup->rgroup
@ -59,6 +35,9 @@ service {
enableDegrade = false
#disable
disable = false
#unit ms,s,m,h,d represents milliseconds, seconds, minutes, hours, days, default permanent
max.commit.retry.timeout = "-1"
max.rollback.retry.timeout = "-1"
}
client {
async.commit.buffer.limit = 10000
@ -66,4 +45,18 @@ client {
retry.internal = 10
retry.times = 30
}
report.retry.count = 5
}
transaction {
undo.data.validation = true
undo.log.serialization = "jackson"
}
## metrics settings
metrics {
enabled = false
registry-type = "compact"
# multi exporters use comma divided
exporter-list = "prometheus"
exporter-prometheus-port = 9898
}

View File

@ -8,7 +8,7 @@ registry {
cluster = "default"
}
eureka {
serviceUrl = "http://localhost:1001/eureka"
serviceUrl = "http://localhost:8761/eureka"
application = "default"
weight = "1"
}
@ -45,7 +45,7 @@ registry {
}
config {
# file、nacos 、apollo、zk
# file、nacos 、apollo、zk、consul、etcd3
type = "file"
nacos {
@ -53,6 +53,9 @@ config {
namespace = "public"
cluster = "default"
}
consul {
serverAddr = "127.0.0.1:8500"
}
apollo {
app.id = "seata-server"
apollo.meta = "http://192.168.1.204:8801"
@ -62,7 +65,10 @@ config {
session.timeout = 6000
connect.timeout = 2000
}
etcd3 {
serverAddr = "http://localhost:2379"
}
file {
name = "file.conf"
}
}
}

View File

@ -19,36 +19,12 @@ transport {
#auto default pin or 8
worker-thread-size = 8
}
}
## transaction log store
store {
## store mode: file、db
mode = "file"
## file store
file {
dir = "sessionStore"
# branch session size , if exceeded first try compress lockkey, still exceeded throws exceptions
max-branch-session-size = 16384
# globe session size , if exceeded throws exceptions
max-global-session-size = 512
# file buffer size , if exceeded allocate new buffer
file-write-buffer-cache-size = 16384
# when recover batch read size
session.reload.read_size = 100
# async, sync
flush-disk-mode = async
shutdown {
# when destroy server, wait seconds
wait = 3
}
## database store
db {
driver_class = ""
url = ""
user = ""
password = ""
}
serialization = "seata"
compressor = "none"
}
service {
#vgroup->rgroup
@ -59,6 +35,9 @@ service {
enableDegrade = false
#disable
disable = false
#unit ms,s,m,h,d represents milliseconds, seconds, minutes, hours, days, default permanent
max.commit.retry.timeout = "-1"
max.rollback.retry.timeout = "-1"
}
client {
async.commit.buffer.limit = 10000
@ -66,4 +45,18 @@ client {
retry.internal = 10
retry.times = 30
}
report.retry.count = 5
}
transaction {
undo.data.validation = true
undo.log.serialization = "jackson"
}
## metrics settings
metrics {
enabled = false
registry-type = "compact"
# multi exporters use comma divided
exporter-list = "prometheus"
exporter-prometheus-port = 9898
}

View File

@ -8,7 +8,7 @@ registry {
cluster = "default"
}
eureka {
serviceUrl = "http://localhost:1001/eureka"
serviceUrl = "http://localhost:8761/eureka"
application = "default"
weight = "1"
}
@ -45,7 +45,7 @@ registry {
}
config {
# file、nacos 、apollo、zk
# file、nacos 、apollo、zk、consul、etcd3
type = "file"
nacos {
@ -53,6 +53,9 @@ config {
namespace = "public"
cluster = "default"
}
consul {
serverAddr = "127.0.0.1:8500"
}
apollo {
app.id = "seata-server"
apollo.meta = "http://192.168.1.204:8801"
@ -62,7 +65,10 @@ config {
session.timeout = 6000
connect.timeout = 2000
}
etcd3 {
serverAddr = "http://localhost:2379"
}
file {
name = "file.conf"
}
}
}

View File

@ -50,6 +50,7 @@ CREATE TABLE `undo_log` (
`id` bigint(20) NOT NULL AUTO_INCREMENT,
`branch_id` bigint(20) NOT NULL,
`xid` varchar(100) NOT NULL,
`context` varchar(128) NOT NULL,
`rollback_info` longblob NOT NULL,
`log_status` int(11) NOT NULL,
`log_created` datetime NOT NULL,
@ -101,13 +102,13 @@ CREATE TABLE `account_tbl` (
进入解压之后的 bin 目录,执行如下命令来启动
```$shell
sh seata-server.sh $LISTEN_PORT $MODE(file or db)
sh seata-server.sh -p $LISTEN_PORT -m $MODE(file or db)
```
在这个示例中,采用如下命令来启动 Seata Server
```$shell
sh seata-server.sh 8091 file
sh seata-server.sh -p 8091 -m file
```
**注意** 如果你修改了endpoint且注册中心使用默认file类型那么记得需要在各个示例工程中的 `file.conf` 文件中,修改 grouplist 的值(当registry.conf 中registry.type 或 config.type 为file 时会读取内部的file节点中的文件名若type不为file将直接从配置类型的对应元数据的注册配置中心读取数据),推荐大家使用 nacos 作为配置注册中心。

View File

@ -19,36 +19,12 @@ transport {
#auto default pin or 8
worker-thread-size = 8
}
}
## transaction log store
store {
## store mode: file、db
mode = "file"
## file store
file {
dir = "sessionStore"
# branch session size , if exceeded first try compress lockkey, still exceeded throws exceptions
max-branch-session-size = 16384
# globe session size , if exceeded throws exceptions
max-global-session-size = 512
# file buffer size , if exceeded allocate new buffer
file-write-buffer-cache-size = 16384
# when recover batch read size
session.reload.read_size = 100
# async, sync
flush-disk-mode = async
shutdown {
# when destroy server, wait seconds
wait = 3
}
## database store
db {
driver_class = ""
url = ""
user = ""
password = ""
}
serialization = "seata"
compressor = "none"
}
service {
#vgroup->rgroup
@ -59,6 +35,9 @@ service {
enableDegrade = false
#disable
disable = false
#unit ms,s,m,h,d represents milliseconds, seconds, minutes, hours, days, default permanent
max.commit.retry.timeout = "-1"
max.rollback.retry.timeout = "-1"
}
client {
async.commit.buffer.limit = 10000
@ -66,4 +45,18 @@ client {
retry.internal = 10
retry.times = 30
}
report.retry.count = 5
}
transaction {
undo.data.validation = true
undo.log.serialization = "jackson"
}
## metrics settings
metrics {
enabled = false
registry-type = "compact"
# multi exporters use comma divided
exporter-list = "prometheus"
exporter-prometheus-port = 9898
}

View File

@ -8,7 +8,7 @@ registry {
cluster = "default"
}
eureka {
serviceUrl = "http://localhost:1001/eureka"
serviceUrl = "http://localhost:8761/eureka"
application = "default"
weight = "1"
}
@ -45,7 +45,7 @@ registry {
}
config {
# file、nacos 、apollo、zk
# file、nacos 、apollo、zk、consul、etcd3
type = "file"
nacos {
@ -53,6 +53,9 @@ config {
namespace = "public"
cluster = "default"
}
consul {
serverAddr = "127.0.0.1:8500"
}
apollo {
app.id = "seata-server"
apollo.meta = "http://192.168.1.204:8801"
@ -62,7 +65,10 @@ config {
session.timeout = 6000
connect.timeout = 2000
}
etcd3 {
serverAddr = "http://localhost:2379"
}
file {
name = "file.conf"
}
}
}

View File

@ -43,6 +43,10 @@
<!--<groupId>com.alibaba.csp</groupId>-->
<!--<artifactId>sentinel-datasource-apollo</artifactId>-->
<!--</dependency>-->
<dependency>
<groupId>com.alibaba.csp</groupId>
<artifactId>sentinel-datasource-redis</artifactId>
</dependency>
<!-- define in spring-boot-autoconfigure module -->
<!--<dependency>-->
<!--<groupId>com.fasterxml.jackson.dataformat</groupId>-->

View File

@ -214,9 +214,9 @@ spring.cloud.sentinel.datasource.ds2.nacos.data-type=json
`ds1``ds2` 表示ReadableDataSource的名称可随意编写。`ds1``ds2` 后面的 `file``nacos` 表示ReadableDataSource的类型。
目前支持`file`, `nacos`, `zk`, `apollo` 这4种类型。
目前支持`file`, `nacos`, `zk`, `apollo``redis` 这5种类型。
其中`nacos``zk``apollo`这3种类型的使用需要加上对应的依赖`sentinel-datasource-nacos`, `sentinel-datasource-zookeeper`, `sentinel-datasource-apollo`。
其中`nacos``zk``apollo``redis` 这4种类型的使用需要加上对应的依赖`sentinel-datasource-nacos`, `sentinel-datasource-zookeeper`, `sentinel-datasource-apollo`, `sentinel-datasource-redis`。
当ReadableDataSource加载规则数据成功的时候控制台会打印出相应的日志信息

View File

@ -189,9 +189,9 @@ spring.cloud.sentinel.datasource.ds2.nacos.data-type=json
`ds1` and `ds2` means the name of ReadableDataSource, you can write whatever you want. The `file` and `nacos` after name `ds1` and `ds2` means the type of ReadableDataSource.
Now ReadableDataSource type support 4 categories: `file`, `nacos`, `zk` and `apollo`.
Now ReadableDataSource type support 5 categories: `file`, `nacos`, `zk`, `apollo` and `redis`.
If you want to use `nacos`, `zk` or `apollo` ReadableDataSource, you could add `sentinel-datasource-nacos`, `sentinel-datasource-zookeeper` or `sentinel-datasource-apollo` dependency.
If you want to use `nacos`, `zk`, `apollo` or `redis` ReadableDataSource, you could add `sentinel-datasource-nacos`, `sentinel-datasource-zookeeper`,`sentinel-datasource-apollo` or `sentinel-datasource-redis` dependency.
When ReadableDataSource load rule data successfully, console will print some logs:

View File

@ -155,6 +155,21 @@ public class NacosDiscoveryProperties {
*/
private String secretKey;
/**
* Heart beat interval. Time unit: second.
*/
private Integer heartBeatInterval;
/**
* Heart beat timeout. Time unit: second.
*/
private Integer heartBeatTimeout;
/**
* Ip delete timeout. Time unit: second.
*/
private Integer ipDeleteTimeout;
@Autowired
private InetUtils inetUtils;
@ -339,6 +354,30 @@ public class NacosDiscoveryProperties {
this.secretKey = secretKey;
}
public Integer getHeartBeatInterval() {
return heartBeatInterval;
}
public void setHeartBeatInterval(Integer heartBeatInterval) {
this.heartBeatInterval = heartBeatInterval;
}
public Integer getHeartBeatTimeout() {
return heartBeatTimeout;
}
public void setHeartBeatTimeout(Integer heartBeatTimeout) {
this.heartBeatTimeout = heartBeatTimeout;
}
public Integer getIpDeleteTimeout() {
return ipDeleteTimeout;
}
public void setIpDeleteTimeout(Integer ipDeleteTimeout) {
this.ipDeleteTimeout = ipDeleteTimeout;
}
public String getNamingLoadCacheAtStart() {
return namingLoadCacheAtStart;
}

View File

@ -31,6 +31,7 @@ import org.springframework.util.StringUtils;
import com.alibaba.cloud.nacos.NacosDiscoveryProperties;
import com.alibaba.nacos.api.naming.NamingService;
import com.alibaba.nacos.api.naming.PreservedMetadataKeys;
/**
* @author xiaojing
@ -76,6 +77,19 @@ public class NacosRegistration implements Registration, ServiceInstance {
metadata.put(MANAGEMENT_ADDRESS, address);
}
}
if (null != nacosDiscoveryProperties.getHeartBeatInterval()) {
metadata.put(PreservedMetadataKeys.HEART_BEAT_INTERVAL,
nacosDiscoveryProperties.getHeartBeatInterval().toString());
}
if (null != nacosDiscoveryProperties.getHeartBeatTimeout()) {
metadata.put(PreservedMetadataKeys.HEART_BEAT_TIMEOUT,
nacosDiscoveryProperties.getHeartBeatTimeout().toString());
}
if (null != nacosDiscoveryProperties.getIpDeleteTimeout()) {
metadata.put(PreservedMetadataKeys.IP_DELETE_TIMEOUT,
nacosDiscoveryProperties.getIpDeleteTimeout().toString());
}
}
@Override

View File

@ -16,7 +16,7 @@
<dependency>
<groupId>io.seata</groupId>
<artifactId>seata-spring</artifactId>
<artifactId>seata-all</artifactId>
</dependency>
<dependency>

View File

@ -53,6 +53,13 @@
<optional>true</optional>
</dependency>
<dependency>
<groupId>com.alibaba.csp</groupId>
<artifactId>sentinel-datasource-redis</artifactId>
<scope>provided</scope>
<optional>true</optional>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-databind</artifactId>

View File

@ -33,6 +33,7 @@ import com.fasterxml.jackson.annotation.JsonIgnore;
* @see ApolloDataSourceProperties
* @see ZookeeperDataSourceProperties
* @see FileDataSourceProperties
* @see RedisDataSourceProperties
*/
public class DataSourcePropertiesConfiguration {
@ -44,6 +45,8 @@ public class DataSourcePropertiesConfiguration {
private ApolloDataSourceProperties apollo;
private RedisDataSourceProperties redis;
public DataSourcePropertiesConfiguration() {
}
@ -63,6 +66,10 @@ public class DataSourcePropertiesConfiguration {
this.apollo = apollo;
}
public DataSourcePropertiesConfiguration(RedisDataSourceProperties redis) {
this.redis = redis;
}
public FileDataSourceProperties getFile() {
return file;
}
@ -95,6 +102,14 @@ public class DataSourcePropertiesConfiguration {
this.apollo = apollo;
}
public RedisDataSourceProperties getRedis() {
return redis;
}
public void setRedis(RedisDataSourceProperties redis) {
this.redis = redis;
}
@JsonIgnore
public List<String> getValidField() {
return Arrays.stream(this.getClass().getDeclaredFields()).map(field -> {

View File

@ -0,0 +1,173 @@
/*
* Copyright (C) 2018 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.alibaba.cloud.sentinel.datasource.config;
import java.time.Duration;
import java.util.List;
import org.springframework.util.StringUtils;
import com.alibaba.cloud.sentinel.datasource.factorybean.RedisDataSourceFactoryBean;
/**
* Zookeeper Properties class Using by {@link DataSourcePropertiesConfiguration} and
* {@link RedisDataSourceFactoryBean}
*
* @author <a href="mailto:wangiegie@gmail.com">lengleng</a>
*/
public class RedisDataSourceProperties extends AbstractDataSourceProperties {
public RedisDataSourceProperties() {
super(RedisDataSourceFactoryBean.class.getName());
}
/**
* redis server host
*/
private String host = "localhost";
/**
* redis server port
*/
private int port = 6379;
/**
* redis server password
*/
private String password;
/**
* redis server default select database
*/
private int database;
/**
* redis server timeout
*/
private Duration timeout;
/**
* Comma-separated list of "host:port" pairs.
*/
private List<String> nodes;
/**
* data key in Redis
*/
private String ruleKey;
/**
* channel to subscribe in Redis
*/
private String channel;
/**
* redis sentinel model
*/
private String masterId;
@Override
public void preCheck(String dataSourceName) {
super.preCheck(dataSourceName);
if (StringUtils.isEmpty(ruleKey)) {
throw new IllegalArgumentException(
"RedisDataSource ruleKey can not be empty");
}
if (StringUtils.isEmpty(channel)) {
throw new IllegalArgumentException(
"RedisDataSource channel can not be empty");
}
if (!StringUtils.isEmpty(masterId) && StringUtils.isEmpty(masterId)) {
throw new IllegalArgumentException(
"RedisDataSource sentinel modelmasterId can not be empty");
}
}
public String getHost() {
return host;
}
public void setHost(String host) {
this.host = host;
}
public int getPort() {
return port;
}
public void setPort(int port) {
this.port = port;
}
public String getRuleKey() {
return ruleKey;
}
public void setRuleKey(String ruleKey) {
this.ruleKey = ruleKey;
}
public String getChannel() {
return channel;
}
public void setChannel(String channel) {
this.channel = channel;
}
public String getPassword() {
return password;
}
public void setPassword(String password) {
this.password = password;
}
public int getDatabase() {
return database;
}
public void setDatabase(int database) {
this.database = database;
}
public Duration getTimeout() {
return timeout;
}
public void setTimeout(Duration timeout) {
this.timeout = timeout;
}
public List<String> getNodes() {
return nodes;
}
public void setNodes(List<String> nodes) {
this.nodes = nodes;
}
public String getMasterId() {
return masterId;
}
public void setMasterId(String masterId) {
this.masterId = masterId;
}
}

View File

@ -0,0 +1,189 @@
/*
* Copyright (C) 2018 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.alibaba.cloud.sentinel.datasource.factorybean;
import java.time.Duration;
import java.util.List;
import org.springframework.beans.factory.FactoryBean;
import org.springframework.util.Assert;
import org.springframework.util.StringUtils;
import com.alibaba.csp.sentinel.datasource.Converter;
import com.alibaba.csp.sentinel.datasource.redis.RedisDataSource;
import com.alibaba.csp.sentinel.datasource.redis.config.RedisConnectionConfig;
import com.alibaba.csp.sentinel.slots.block.flow.FlowRule;
/**
* A {@link FactoryBean} for creating {@link RedisDataSource} instance.
*
* @author <a href="mailto:wangiegie@gmail.com">lengleng</a>
* @see RedisDataSource
*/
public class RedisDataSourceFactoryBean implements FactoryBean<RedisDataSource> {
private String host;
private int port;
private int database;
private Duration timeout;
/**
* Comma-separated list of "host:port" pairs.
*/
private List<String> nodes;
private Converter converter;
/**
* data key in Redis
*/
private String ruleKey;
/**
* channel to subscribe in Redis
*/
private String channel;
/**
* redis server password
*/
private String password;
private String masterId;
@Override
public RedisDataSource getObject() {
RedisConnectionConfig.Builder builder = RedisConnectionConfig.builder();
if (nodes == null || nodes.isEmpty()) {
builder.withHost(host).withPort(port).withDatabase(database);
}
else {
nodes.forEach(node -> {
try {
String[] parts = StringUtils.split(node, ":");
Assert.state(parts.length == 2, "Must be defined as 'host:port'");
builder.withRedisSentinel(parts[0], Integer.parseInt(parts[1]));
}
catch (RuntimeException ex) {
throw new IllegalStateException(
"Invalid redis sentinel property " + node, ex);
}
});
builder.withSentinelMasterId(masterId);
}
if (timeout != null) {
builder.withTimeout(timeout.toMillis());
}
if (StringUtils.hasText(password)) {
builder.withPassword(password);
}
return new RedisDataSource<List<FlowRule>>(builder.build(), ruleKey, channel,
converter);
}
@Override
public Class<?> getObjectType() {
return RedisDataSource.class;
}
public Converter getConverter() {
return converter;
}
public void setConverter(Converter converter) {
this.converter = converter;
}
public String getHost() {
return host;
}
public void setHost(String host) {
this.host = host;
}
public int getPort() {
return port;
}
public void setPort(int port) {
this.port = port;
}
public String getRuleKey() {
return ruleKey;
}
public void setRuleKey(String ruleKey) {
this.ruleKey = ruleKey;
}
public String getChannel() {
return channel;
}
public void setChannel(String channel) {
this.channel = channel;
}
public String getPassword() {
return password;
}
public void setPassword(String password) {
this.password = password;
}
public int getDatabase() {
return database;
}
public void setDatabase(int database) {
this.database = database;
}
public Duration getTimeout() {
return timeout;
}
public void setTimeout(Duration timeout) {
this.timeout = timeout;
}
public List<String> getNodes() {
return nodes;
}
public void setNodes(List<String> nodes) {
this.nodes = nodes;
}
public String getMasterId() {
return masterId;
}
public void setMasterId(String masterId) {
this.masterId = masterId;
}
}

View File

@ -1,4 +1,5 @@
nacos=com.alibaba.csp.sentinel.datasource.nacos.NacosDataSource
file=com.alibaba.csp.sentinel.datasource.FileRefreshableDataSource
apollo=com.alibaba.csp.sentinel.datasource.apollo.ApolloDataSource
zk=com.alibaba.csp.sentinel.datasource.zookeeper.ZookeeperDataSource
zk=com.alibaba.csp.sentinel.datasource.zookeeper.ZookeeperDataSource
redis=com.alibaba.csp.sentinel.datasource.redis.RedisDataSource

View File

@ -93,45 +93,51 @@ public class SentinelInvocationHandler implements InvocationHandler {
MethodMetadata methodMetadata = SentinelContractHolder.metadataMap
.get(hardCodedTarget.type().getName()
+ Feign.configKey(hardCodedTarget.type(), method));
// resource default is HttpMethod:protocol://url
String resourceName = methodMetadata.template().method().toUpperCase() + ":"
+ hardCodedTarget.url() + methodMetadata.template().url();
Entry entry = null;
try {
ContextUtil.enter(resourceName);
entry = SphU.entry(resourceName, EntryType.OUT, 1, args);
if (methodMetadata == null) {
result = methodHandler.invoke(args);
}
catch (Throwable ex) {
// fallback handle
if (!BlockException.isBlockException(ex)) {
Tracer.trace(ex);
else {
// resource default is HttpMethod:protocol://url
String resourceName = methodMetadata.template().method().toUpperCase()
+ ":" + hardCodedTarget.url() + methodMetadata.template().url();
Entry entry = null;
try {
ContextUtil.enter(resourceName);
entry = SphU.entry(resourceName, EntryType.OUT, 1, args);
result = methodHandler.invoke(args);
}
if (fallbackFactory != null) {
try {
Object fallbackResult = fallbackMethodMap.get(method)
.invoke(fallbackFactory.create(ex), args);
return fallbackResult;
catch (Throwable ex) {
// fallback handle
if (!BlockException.isBlockException(ex)) {
Tracer.trace(ex);
}
catch (IllegalAccessException e) {
// shouldn't happen as method is public due to being an interface
throw new AssertionError(e);
if (fallbackFactory != null) {
try {
Object fallbackResult = fallbackMethodMap.get(method)
.invoke(fallbackFactory.create(ex), args);
return fallbackResult;
}
catch (IllegalAccessException e) {
// shouldn't happen as method is public due to being an
// interface
throw new AssertionError(e);
}
catch (InvocationTargetException e) {
throw new AssertionError(e.getCause());
}
}
catch (InvocationTargetException e) {
throw new AssertionError(e.getCause());
else {
// throw exception if fallbackFactory is null
throw ex;
}
}
else {
// throw exception if fallbackFactory is null
throw ex;
finally {
if (entry != null) {
entry.exit(1, args);
}
ContextUtil.exit();
}
}
finally {
if (entry != null) {
entry.exit(1, args);
}
ContextUtil.exit();
}
}
else {
// other target type using default strategy

View File

@ -18,6 +18,7 @@ package com.alibaba.alicloud.acm;
import org.springframework.beans.BeansException;
import org.springframework.boot.autoconfigure.condition.ConditionalOnClass;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.cloud.context.refresh.ContextRefresher;
import org.springframework.context.ApplicationContext;
import org.springframework.context.ApplicationContextAware;
@ -37,6 +38,7 @@ import com.taobao.diamond.client.Diamond;
*/
@Configuration
@ConditionalOnClass({ Diamond.class })
@ConditionalOnProperty(name = "spring.cloud.alicloud.acm.enabled", matchIfMissing = true)
public class AcmAutoConfiguration implements ApplicationContextAware {
private ApplicationContext applicationContext;

View File

@ -17,6 +17,7 @@
package com.alibaba.alicloud.acm.bootstrap;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.cloud.bootstrap.config.PropertySourceLocator;
import org.springframework.core.env.CompositePropertySource;
import org.springframework.core.env.Environment;
@ -28,6 +29,7 @@ import com.alibaba.alicloud.context.acm.AcmIntegrationProperties;
* @author juven.xuxb
* @author xiaolongzuo
*/
@ConditionalOnProperty(name = "spring.cloud.alicloud.acm.enabled", matchIfMissing = true)
public class AcmPropertySourceLocator implements PropertySourceLocator {
private static final String DIAMOND_PROPERTY_SOURCE_NAME = "diamond";

View File

@ -20,6 +20,7 @@ import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.actuate.autoconfigure.endpoint.condition.ConditionalOnEnabledEndpoint;
import org.springframework.boot.autoconfigure.condition.ConditionalOnClass;
import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.boot.autoconfigure.condition.ConditionalOnWebApplication;
import org.springframework.context.annotation.Bean;
@ -32,6 +33,7 @@ import com.alibaba.alicloud.context.acm.AcmProperties;
*/
@ConditionalOnWebApplication
@ConditionalOnClass(name = "org.springframework.boot.actuate.autoconfigure.web.ManagementContextConfiguration")
@ConditionalOnProperty(name = "spring.cloud.alicloud.acm.enabled", matchIfMissing = true)
public class AcmEndpointAutoConfiguration {
@Autowired

View File

@ -4,6 +4,12 @@
"name": "spring.application.group",
"type": "java.lang.String",
"description": "spring application group."
},
{
"name": "spring.cloud.alicloud.acm.enabled",
"type": "java.lang.Boolean",
"defaultValue": true,
"description": "enable acm or not."
}
]
}