mirror of
https://gitee.com/mirrors/Spring-Cloud-Alibaba.git
synced 2021-06-26 13:25:11 +08:00
sync code from master
This commit is contained in:
parent
c908e8c1cc
commit
8ddc5ee9a4
@ -19,21 +19,21 @@
|
||||
<properties>
|
||||
<sentinel.version>1.6.1</sentinel.version>
|
||||
<oss.version>3.1.0</oss.version>
|
||||
<fescar.version>0.4.2</fescar.version>
|
||||
<seata.version>0.5.1</seata.version>
|
||||
<nacos.client.version>1.0.0</nacos.client.version>
|
||||
<nacos.config.version>0.8.0</nacos.config.version>
|
||||
<acm.version>1.0.8</acm.version>
|
||||
<acm.version>1.0.9</acm.version>
|
||||
<ans.version>1.0.1</ans.version>
|
||||
<aliyun.sdk.version>4.0.1</aliyun.sdk.version>
|
||||
<aliyun.sdk.version>4.4.1</aliyun.sdk.version>
|
||||
<alicloud.context.version>1.0.5</alicloud.context.version>
|
||||
<aliyun.sdk.edas.version>2.16.0</aliyun.sdk.edas.version>
|
||||
<aliyun.sdk.edas.version>2.44.0</aliyun.sdk.edas.version>
|
||||
<rocketmq.starter.version>2.0.2</rocketmq.starter.version>
|
||||
<schedulerX.client.version>2.1.6</schedulerX.client.version>
|
||||
<dubbo.version>2.7.1</dubbo.version>
|
||||
<dubbo-spring-boot.version>2.7.1</dubbo-spring-boot.version>
|
||||
<dubbo-registry-nacos.version>0.0.2</dubbo-registry-nacos.version>
|
||||
<dubbo-registry-nacos.version>2.7.1</dubbo-registry-nacos.version>
|
||||
<aliyun.java.sdk.dysmsapi>1.1.0</aliyun.java.sdk.dysmsapi>
|
||||
<aliyun.sdk.mns>1.1.8</aliyun.sdk.mns>
|
||||
<aliyun.sdk.mns>1.1.8.6</aliyun.sdk.mns>
|
||||
<aliyun.java.sdk.dyvmsapi>1.1.1</aliyun.java.sdk.dyvmsapi>
|
||||
</properties>
|
||||
|
||||
@ -205,12 +205,13 @@
|
||||
</dependency>
|
||||
|
||||
|
||||
<!--Alibaba Fescar-->
|
||||
|
||||
<!--Alibaba Seata-->
|
||||
|
||||
<dependency>
|
||||
<groupId>com.alibaba.fescar</groupId>
|
||||
<artifactId>fescar-spring</artifactId>
|
||||
<version>${fescar.version}</version>
|
||||
<groupId>io.seata</groupId>
|
||||
<artifactId>seata-spring</artifactId>
|
||||
<version>${seata.version}</version>
|
||||
</dependency>
|
||||
|
||||
<!-- Dubbo -->
|
||||
@ -243,7 +244,7 @@
|
||||
|
||||
<!-- Dubbo Nacos registry dependency -->
|
||||
<dependency>
|
||||
<groupId>com.alibaba</groupId>
|
||||
<groupId>org.apache.dubbo</groupId>
|
||||
<artifactId>dubbo-registry-nacos</artifactId>
|
||||
<version>${dubbo-registry-nacos.version}</version>
|
||||
</dependency>
|
||||
|
@ -17,7 +17,7 @@ package org.springframework.cloud.alibaba.cloud.examples;
|
||||
|
||||
import java.util.Random;
|
||||
|
||||
import com.alibaba.fescar.core.context.RootContext;
|
||||
import io.seata.core.context.RootContext;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
@ -16,17 +16,16 @@
|
||||
package org.springframework.cloud.alibaba.cloud.examples;
|
||||
|
||||
import java.sql.SQLException;
|
||||
import java.util.Random;
|
||||
|
||||
import com.alibaba.druid.pool.DruidDataSource;
|
||||
|
||||
import io.seata.rm.datasource.DataSourceProxy;
|
||||
import org.springframework.context.ApplicationContext;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.core.env.Environment;
|
||||
import org.springframework.jdbc.core.JdbcTemplate;
|
||||
|
||||
import com.alibaba.druid.pool.DruidDataSource;
|
||||
import com.alibaba.fescar.rm.datasource.DataSourceProxy;
|
||||
|
||||
/**
|
||||
* @author xiaojing
|
||||
*/
|
||||
|
@ -20,17 +20,26 @@ transport {
|
||||
worker-thread-size = 8
|
||||
}
|
||||
}
|
||||
store {
|
||||
# branch session size , if exceeded first try compress lockkey, still exceeded throws exceptions
|
||||
max-branch-session-size = 16384
|
||||
# globe session size , if exceeded throws exceptions
|
||||
max-global-session-size = 512
|
||||
# file buffer size , if exceeded allocate new buffer
|
||||
file-write-buffer-cache-size = 16384
|
||||
# when recover batch read size
|
||||
session.reload.read_size = 100
|
||||
}
|
||||
service {
|
||||
#vgroup->rgroup
|
||||
vgroup_mapping.account-service-fescar-service-group = "localRgroup"
|
||||
vgroup_mapping.account-service-fescar-service-group = "default"
|
||||
#only support single node
|
||||
localRgroup.grouplist = "127.0.0.1:8091"
|
||||
default.grouplist = "127.0.0.1:8091"
|
||||
#degrade current not support
|
||||
enableDegrade = false
|
||||
#disable
|
||||
disable = false
|
||||
}
|
||||
|
||||
client {
|
||||
async.commit.buffer.limit = 10000
|
||||
lock {
|
||||
|
@ -1,5 +1,5 @@
|
||||
registry {
|
||||
# file 、nacos 、redis
|
||||
# file 、nacos 、eureka、redis、zk、consul
|
||||
type = "file"
|
||||
|
||||
nacos {
|
||||
@ -7,17 +7,32 @@ registry {
|
||||
namespace = "public"
|
||||
cluster = "default"
|
||||
}
|
||||
eureka {
|
||||
serviceUrl = "http://localhost:1001/eureka"
|
||||
application = "default"
|
||||
weight = "1"
|
||||
}
|
||||
redis {
|
||||
serverAddr = "localhost:6379"
|
||||
db = "0"
|
||||
}
|
||||
zk {
|
||||
cluster = "default"
|
||||
serverAddr = "127.0.0.1:2181"
|
||||
session.timeout = 6000
|
||||
connect.timeout = 2000
|
||||
}
|
||||
consul {
|
||||
cluster = "default"
|
||||
serverAddr = "127.0.0.1:8500"
|
||||
}
|
||||
file {
|
||||
name = "file.conf"
|
||||
}
|
||||
}
|
||||
|
||||
config {
|
||||
# file nacos apollo
|
||||
# file、nacos 、apollo、zk
|
||||
type = "file"
|
||||
|
||||
nacos {
|
||||
@ -29,6 +44,11 @@ config {
|
||||
app.id = "fescar-server"
|
||||
apollo.meta = "http://192.168.1.204:8801"
|
||||
}
|
||||
zk {
|
||||
serverAddr = "127.0.0.1:2181"
|
||||
session.timeout = 6000
|
||||
connect.timeout = 2000
|
||||
}
|
||||
file {
|
||||
name = "file.conf"
|
||||
}
|
||||
|
@ -16,7 +16,7 @@
|
||||
|
||||
package org.springframework.cloud.alibaba.cloud.examples;
|
||||
|
||||
import com.alibaba.fescar.spring.annotation.GlobalTransactional;
|
||||
import io.seata.spring.annotation.GlobalTransactional;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
@ -20,17 +20,46 @@ transport {
|
||||
worker-thread-size = 8
|
||||
}
|
||||
}
|
||||
## transaction log store
|
||||
store {
|
||||
## store mode: file、db
|
||||
mode = "file"
|
||||
|
||||
## file store
|
||||
file {
|
||||
dir = "sessionStore"
|
||||
|
||||
# branch session size , if exceeded first try compress lockkey, still exceeded throws exceptions
|
||||
max-branch-session-size = 16384
|
||||
# globe session size , if exceeded throws exceptions
|
||||
max-global-session-size = 512
|
||||
# file buffer size , if exceeded allocate new buffer
|
||||
file-write-buffer-cache-size = 16384
|
||||
# when recover batch read size
|
||||
session.reload.read_size = 100
|
||||
# async, sync
|
||||
flush-disk-mode = async
|
||||
}
|
||||
|
||||
## database store
|
||||
db {
|
||||
driver_class = ""
|
||||
url = ""
|
||||
user = ""
|
||||
password = ""
|
||||
}
|
||||
|
||||
}
|
||||
service {
|
||||
#vgroup->rgroup
|
||||
vgroup_mapping.business-service-fescar-service-group = "localRgroup"
|
||||
vgroup_mapping.business-service-fescar-service-group = "default"
|
||||
#only support single node
|
||||
localRgroup.grouplist = "127.0.0.1:8091"
|
||||
default.grouplist = "127.0.0.1:8091"
|
||||
#degrade current not support
|
||||
enableDegrade = false
|
||||
#disable
|
||||
disable = false
|
||||
}
|
||||
|
||||
client {
|
||||
async.commit.buffer.limit = 10000
|
||||
lock {
|
||||
|
@ -1,5 +1,5 @@
|
||||
registry {
|
||||
# file 、nacos 、redis
|
||||
# file 、nacos 、eureka、redis、zk、consul、etcd3、sofa
|
||||
type = "file"
|
||||
|
||||
nacos {
|
||||
@ -7,17 +7,45 @@ registry {
|
||||
namespace = "public"
|
||||
cluster = "default"
|
||||
}
|
||||
eureka {
|
||||
serviceUrl = "http://localhost:1001/eureka"
|
||||
application = "default"
|
||||
weight = "1"
|
||||
}
|
||||
redis {
|
||||
serverAddr = "localhost:6379"
|
||||
db = "0"
|
||||
}
|
||||
zk {
|
||||
cluster = "default"
|
||||
serverAddr = "127.0.0.1:2181"
|
||||
session.timeout = 6000
|
||||
connect.timeout = 2000
|
||||
}
|
||||
consul {
|
||||
cluster = "default"
|
||||
serverAddr = "127.0.0.1:8500"
|
||||
}
|
||||
etcd3 {
|
||||
cluster = "default"
|
||||
serverAddr = "http://localhost:2379"
|
||||
}
|
||||
sofa {
|
||||
serverAddr = "127.0.0.1:9603"
|
||||
application = "default"
|
||||
region = "DEFAULT_ZONE"
|
||||
datacenter = "DefaultDataCenter"
|
||||
cluster = "default"
|
||||
group = "SEATA_GROUP"
|
||||
addressWaitTime = "3000"
|
||||
}
|
||||
file {
|
||||
name = "file.conf"
|
||||
}
|
||||
}
|
||||
|
||||
config {
|
||||
# file nacos apollo
|
||||
# file、nacos 、apollo、zk
|
||||
type = "file"
|
||||
|
||||
nacos {
|
||||
@ -26,9 +54,14 @@ config {
|
||||
cluster = "default"
|
||||
}
|
||||
apollo {
|
||||
app.id = "fescar-server"
|
||||
app.id = "seata-server"
|
||||
apollo.meta = "http://192.168.1.204:8801"
|
||||
}
|
||||
zk {
|
||||
serverAddr = "127.0.0.1:2181"
|
||||
session.timeout = 6000
|
||||
connect.timeout = 2000
|
||||
}
|
||||
file {
|
||||
name = "file.conf"
|
||||
}
|
||||
|
@ -24,7 +24,7 @@ import org.springframework.core.env.Environment;
|
||||
import org.springframework.jdbc.core.JdbcTemplate;
|
||||
|
||||
import com.alibaba.druid.pool.DruidDataSource;
|
||||
import com.alibaba.fescar.rm.datasource.DataSourceProxy;
|
||||
import io.seata.rm.datasource.DataSourceProxy;
|
||||
|
||||
/**
|
||||
* @author xiaojing
|
||||
|
@ -16,7 +16,7 @@
|
||||
|
||||
package org.springframework.cloud.alibaba.cloud.examples;
|
||||
|
||||
import com.alibaba.fescar.core.context.RootContext;
|
||||
import io.seata.core.context.RootContext;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
@ -20,17 +20,46 @@ transport {
|
||||
worker-thread-size = 8
|
||||
}
|
||||
}
|
||||
## transaction log store
|
||||
store {
|
||||
## store mode: file、db
|
||||
mode = "file"
|
||||
|
||||
## file store
|
||||
file {
|
||||
dir = "sessionStore"
|
||||
|
||||
# branch session size , if exceeded first try compress lockkey, still exceeded throws exceptions
|
||||
max-branch-session-size = 16384
|
||||
# globe session size , if exceeded throws exceptions
|
||||
max-global-session-size = 512
|
||||
# file buffer size , if exceeded allocate new buffer
|
||||
file-write-buffer-cache-size = 16384
|
||||
# when recover batch read size
|
||||
session.reload.read_size = 100
|
||||
# async, sync
|
||||
flush-disk-mode = async
|
||||
}
|
||||
|
||||
## database store
|
||||
db {
|
||||
driver_class = ""
|
||||
url = ""
|
||||
user = ""
|
||||
password = ""
|
||||
}
|
||||
|
||||
}
|
||||
service {
|
||||
#vgroup->rgroup
|
||||
vgroup_mapping.order-service-fescar-service-group = "localRgroup"
|
||||
vgroup_mapping.order-service-fescar-service-group = "default"
|
||||
#only support single node
|
||||
localRgroup.grouplist = "127.0.0.1:8091"
|
||||
default.grouplist = "127.0.0.1:8091"
|
||||
#degrade current not support
|
||||
enableDegrade = false
|
||||
#disable
|
||||
disable = false
|
||||
}
|
||||
|
||||
client {
|
||||
async.commit.buffer.limit = 10000
|
||||
lock {
|
||||
|
@ -1,5 +1,5 @@
|
||||
registry {
|
||||
# file 、nacos 、redis
|
||||
# file 、nacos 、eureka、redis、zk、consul、etcd3、sofa
|
||||
type = "file"
|
||||
|
||||
nacos {
|
||||
@ -7,17 +7,45 @@ registry {
|
||||
namespace = "public"
|
||||
cluster = "default"
|
||||
}
|
||||
eureka {
|
||||
serviceUrl = "http://localhost:1001/eureka"
|
||||
application = "default"
|
||||
weight = "1"
|
||||
}
|
||||
redis {
|
||||
serverAddr = "localhost:6379"
|
||||
db = "0"
|
||||
}
|
||||
zk {
|
||||
cluster = "default"
|
||||
serverAddr = "127.0.0.1:2181"
|
||||
session.timeout = 6000
|
||||
connect.timeout = 2000
|
||||
}
|
||||
consul {
|
||||
cluster = "default"
|
||||
serverAddr = "127.0.0.1:8500"
|
||||
}
|
||||
etcd3 {
|
||||
cluster = "default"
|
||||
serverAddr = "http://localhost:2379"
|
||||
}
|
||||
sofa {
|
||||
serverAddr = "127.0.0.1:9603"
|
||||
application = "default"
|
||||
region = "DEFAULT_ZONE"
|
||||
datacenter = "DefaultDataCenter"
|
||||
cluster = "default"
|
||||
group = "SEATA_GROUP"
|
||||
addressWaitTime = "3000"
|
||||
}
|
||||
file {
|
||||
name = "file.conf"
|
||||
}
|
||||
}
|
||||
|
||||
config {
|
||||
# file nacos apollo
|
||||
# file、nacos 、apollo、zk
|
||||
type = "file"
|
||||
|
||||
nacos {
|
||||
@ -26,9 +54,14 @@ config {
|
||||
cluster = "default"
|
||||
}
|
||||
apollo {
|
||||
app.id = "fescar-server"
|
||||
app.id = "seata-server"
|
||||
apollo.meta = "http://192.168.1.204:8801"
|
||||
}
|
||||
zk {
|
||||
serverAddr = "127.0.0.1:2181"
|
||||
session.timeout = 6000
|
||||
connect.timeout = 2000
|
||||
}
|
||||
file {
|
||||
name = "file.conf"
|
||||
}
|
||||
|
@ -19,14 +19,14 @@
|
||||
|
||||
1. 创建 示例中 业务所需要的数据库表
|
||||
|
||||
1. 启动 Fescar Server
|
||||
1. 启动 Seata Server
|
||||
|
||||
|
||||
### 配置数据库
|
||||
|
||||
首先,你需要有一个支持 InnoDB 引擎的 MySQL 数据库。
|
||||
|
||||
**注意**: 实际上,Fescar 支持不同的应用使用完全不相干的数据库,但是这里为了简单地演示一个原理,所以我们选择了只使用一个数据库。
|
||||
**注意**: 实际上,Seata 支持不同的应用使用完全不相干的数据库,但是这里为了简单地演示一个原理,所以我们选择了只使用一个数据库。
|
||||
|
||||
将 `account-server`、`order-service`、`storage-service` 这三个应用中的 resources 目录下的 `application.properties` 文件中的如下配置修改成你运行环境中的实际配置。
|
||||
|
||||
@ -42,7 +42,7 @@ mysql.user.password=your mysql server password
|
||||
|
||||
### 创建 undo_log 表
|
||||
|
||||
[Fescar AT 模式]() 需要使用到 undo_log 表。
|
||||
[Seata AT 模式]() 需要使用到 undo_log 表。
|
||||
|
||||
``` $sql
|
||||
-- 注意此处0.3.0+ 增加唯一索引 ux_undo_log
|
||||
@ -93,24 +93,24 @@ CREATE TABLE `account_tbl` (
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
```
|
||||
|
||||
### 启动 Fescar Server
|
||||
### 启动 Seata Server
|
||||
|
||||
点击这个页面 [https://github.com/alibaba/fescar/releases](https://github.com/alibaba/fescar/releases),下载最新版本的 Fescar Server 端.
|
||||
点击这个页面 [https://github.com/seata/seata/releases](https://github.com/seata/seata/releases),下载最新版本的 Seata Server 端.
|
||||
|
||||
|
||||
进入解压之后的 bin 目录,执行如下命令来启动
|
||||
|
||||
```$shell
|
||||
sh fescar-server.sh $LISTEN_PORT $PATH_FOR_PERSISTENT_DATA
|
||||
sh seata-server.sh $LISTEN_PORT $MODE(file or db)
|
||||
```
|
||||
|
||||
在这个示例中,采用如下命令来启动 Fescar Server
|
||||
在这个示例中,采用如下命令来启动 Seata Server
|
||||
|
||||
```$shell
|
||||
sh fescar-server.sh 8091 ~/fescar/data/
|
||||
sh seata-server.sh 8091 file
|
||||
```
|
||||
|
||||
**注意** 如果你修改了端口号,那么记得需要在各个示例工程中的 `application.conf` 文件中,修改 grouplist 的值。
|
||||
**注意** 如果你修改了endpoint且注册中心使用默认file类型,那么记得需要在各个示例工程中的 `file.conf` 文件中,修改 grouplist 的值(当registry.conf 中registry.type 或 config.type 为file 时会读取内部的file节点中的文件名,若type不为file将直接从配置类型的对应元数据的注册配置中心读取数据),推荐大家使用 nacos 作为配置注册中心。
|
||||
|
||||
|
||||
## 运行示例
|
||||
@ -146,12 +146,12 @@ http://127.0.0.1:18081/fescar/rest
|
||||
|
||||
## 对 Spring Cloud 支持点
|
||||
|
||||
- 通过 Spring MVC 提供服务的服务提供者,在收到 header 中含有 Fescar 信息的 HTTP 请求时,可以自动还原 Fescar 上下文。
|
||||
- 通过 Spring MVC 提供服务的服务提供者,在收到 header 中含有 Seata 信息的 HTTP 请求时,可以自动还原 Seata 上下文。
|
||||
|
||||
- 支持服务调用者通过 RestTemplate 调用时,自动传递 Fescar 上下文。
|
||||
- 支持服务调用者通过 RestTemplate 调用时,自动传递 Seata 上下文。
|
||||
|
||||
- 支持服务调用者通过 FeignClient 调用时,自动传递 Fescar 上下文。
|
||||
- 支持服务调用者通过 FeignClient 调用时,自动传递 Seata 上下文。
|
||||
|
||||
- 支持 FeignClient 和 Hystrix 同时使用的场景。
|
||||
- 支持 SeataClient 和 Hystrix 同时使用的场景。
|
||||
|
||||
- 支持 FeignClient 和 Sentinel 同时使用的场景。
|
||||
- 支持 SeataClient 和 Sentinel 同时使用的场景。
|
||||
|
@ -19,7 +19,7 @@ package org.springframework.cloud.alibaba.cloud.examples;
|
||||
import java.sql.SQLException;
|
||||
|
||||
import com.alibaba.druid.pool.DruidDataSource;
|
||||
import com.alibaba.fescar.rm.datasource.DataSourceProxy;
|
||||
import io.seata.rm.datasource.DataSourceProxy;
|
||||
|
||||
import org.springframework.context.ApplicationContext;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
|
@ -16,7 +16,7 @@
|
||||
|
||||
package org.springframework.cloud.alibaba.cloud.examples;
|
||||
|
||||
import com.alibaba.fescar.core.context.RootContext;
|
||||
import io.seata.core.context.RootContext;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
@ -20,17 +20,46 @@ transport {
|
||||
worker-thread-size = 8
|
||||
}
|
||||
}
|
||||
## transaction log store
|
||||
store {
|
||||
## store mode: file、db
|
||||
mode = "file"
|
||||
|
||||
## file store
|
||||
file {
|
||||
dir = "sessionStore"
|
||||
|
||||
# branch session size , if exceeded first try compress lockkey, still exceeded throws exceptions
|
||||
max-branch-session-size = 16384
|
||||
# globe session size , if exceeded throws exceptions
|
||||
max-global-session-size = 512
|
||||
# file buffer size , if exceeded allocate new buffer
|
||||
file-write-buffer-cache-size = 16384
|
||||
# when recover batch read size
|
||||
session.reload.read_size = 100
|
||||
# async, sync
|
||||
flush-disk-mode = async
|
||||
}
|
||||
|
||||
## database store
|
||||
db {
|
||||
driver_class = ""
|
||||
url = ""
|
||||
user = ""
|
||||
password = ""
|
||||
}
|
||||
|
||||
}
|
||||
service {
|
||||
#vgroup->rgroup
|
||||
vgroup_mapping.storage-service-fescar-service-group = "localRgroup"
|
||||
vgroup_mapping.storage-service-fescar-service-group = "default"
|
||||
#only support single node
|
||||
localRgroup.grouplist = "127.0.0.1:8091"
|
||||
default.grouplist = "127.0.0.1:8091"
|
||||
#degrade current not support
|
||||
enableDegrade = false
|
||||
#disable
|
||||
disable = false
|
||||
}
|
||||
|
||||
client {
|
||||
async.commit.buffer.limit = 10000
|
||||
lock {
|
||||
|
@ -1,5 +1,5 @@
|
||||
registry {
|
||||
# file 、nacos 、redis
|
||||
# file 、nacos 、eureka、redis、zk、consul、etcd3、sofa
|
||||
type = "file"
|
||||
|
||||
nacos {
|
||||
@ -7,17 +7,45 @@ registry {
|
||||
namespace = "public"
|
||||
cluster = "default"
|
||||
}
|
||||
eureka {
|
||||
serviceUrl = "http://localhost:1001/eureka"
|
||||
application = "default"
|
||||
weight = "1"
|
||||
}
|
||||
redis {
|
||||
serverAddr = "localhost:6379"
|
||||
db = "0"
|
||||
}
|
||||
zk {
|
||||
cluster = "default"
|
||||
serverAddr = "127.0.0.1:2181"
|
||||
session.timeout = 6000
|
||||
connect.timeout = 2000
|
||||
}
|
||||
consul {
|
||||
cluster = "default"
|
||||
serverAddr = "127.0.0.1:8500"
|
||||
}
|
||||
etcd3 {
|
||||
cluster = "default"
|
||||
serverAddr = "http://localhost:2379"
|
||||
}
|
||||
sofa {
|
||||
serverAddr = "127.0.0.1:9603"
|
||||
application = "default"
|
||||
region = "DEFAULT_ZONE"
|
||||
datacenter = "DefaultDataCenter"
|
||||
cluster = "default"
|
||||
group = "SEATA_GROUP"
|
||||
addressWaitTime = "3000"
|
||||
}
|
||||
file {
|
||||
name = "file.conf"
|
||||
}
|
||||
}
|
||||
|
||||
config {
|
||||
# file nacos apollo
|
||||
# file、nacos 、apollo、zk
|
||||
type = "file"
|
||||
|
||||
nacos {
|
||||
@ -26,9 +54,14 @@ config {
|
||||
cluster = "default"
|
||||
}
|
||||
apollo {
|
||||
app.id = "fescar-server"
|
||||
app.id = "seata-server"
|
||||
apollo.meta = "http://192.168.1.204:8801"
|
||||
}
|
||||
zk {
|
||||
serverAddr = "127.0.0.1:2181"
|
||||
session.timeout = 6000
|
||||
connect.timeout = 2000
|
||||
}
|
||||
file {
|
||||
name = "file.conf"
|
||||
}
|
||||
|
@ -28,25 +28,25 @@ import java.util.concurrent.ConcurrentHashMap;
|
||||
*/
|
||||
public class NacosPropertySourceRepository {
|
||||
|
||||
private final static ConcurrentHashMap<String, NacosPropertySource> nacosPropertySourceRepository = new ConcurrentHashMap<>();
|
||||
private final static ConcurrentHashMap<String, NacosPropertySource> NACOS_PROPERTY_SOURCE_REPOSITORY = new ConcurrentHashMap<>();
|
||||
|
||||
/**
|
||||
* @return all nacos properties from application context
|
||||
*/
|
||||
public static List<NacosPropertySource> getAll() {
|
||||
List<NacosPropertySource> result = new ArrayList<>();
|
||||
result.addAll(nacosPropertySourceRepository.values());
|
||||
result.addAll(NACOS_PROPERTY_SOURCE_REPOSITORY.values());
|
||||
return result;
|
||||
}
|
||||
|
||||
public static void collectNacosPropertySources(
|
||||
NacosPropertySource nacosPropertySource) {
|
||||
nacosPropertySourceRepository.putIfAbsent(nacosPropertySource.getDataId(),
|
||||
NACOS_PROPERTY_SOURCE_REPOSITORY.putIfAbsent(nacosPropertySource.getDataId(),
|
||||
nacosPropertySource);
|
||||
}
|
||||
|
||||
public static NacosPropertySource getNacosPropertySource(String dataId) {
|
||||
|
||||
return nacosPropertySourceRepository.get(dataId);
|
||||
return NACOS_PROPERTY_SOURCE_REPOSITORY.get(dataId);
|
||||
}
|
||||
}
|
||||
|
@ -16,13 +16,6 @@
|
||||
|
||||
package org.springframework.cloud.alibaba.nacos.endpoint;
|
||||
|
||||
import java.text.DateFormat;
|
||||
import java.text.SimpleDateFormat;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.springframework.boot.actuate.endpoint.annotation.Endpoint;
|
||||
import org.springframework.boot.actuate.endpoint.annotation.ReadOperation;
|
||||
import org.springframework.cloud.alibaba.nacos.NacosConfigProperties;
|
||||
@ -30,6 +23,13 @@ import org.springframework.cloud.alibaba.nacos.NacosPropertySourceRepository;
|
||||
import org.springframework.cloud.alibaba.nacos.client.NacosPropertySource;
|
||||
import org.springframework.cloud.alibaba.nacos.refresh.NacosRefreshHistory;
|
||||
|
||||
import java.text.DateFormat;
|
||||
import java.text.SimpleDateFormat;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Endpoint for Nacos, contains config data and refresh history
|
||||
* @author xiaojing
|
||||
|
@ -118,57 +118,10 @@ public class NacosWatch implements ApplicationEventPublisherAware, SmartLifecycl
|
||||
}
|
||||
|
||||
public void nacosServicesWatch() {
|
||||
try {
|
||||
|
||||
boolean changed = false;
|
||||
NamingService namingService = properties.namingServiceInstance();
|
||||
// nacos doesn't support watch now , publish an event every 30 seconds.
|
||||
this.publisher.publishEvent(
|
||||
new HeartbeatEvent(this, nacosWatchIndex.getAndIncrement()));
|
||||
|
||||
ListView<String> listView = properties.namingServiceInstance()
|
||||
.getServicesOfServer(1, Integer.MAX_VALUE);
|
||||
|
||||
List<String> serviceList = listView.getData();
|
||||
|
||||
// if there are new services found, publish event
|
||||
Set<String> currentServices = new HashSet<>(serviceList);
|
||||
currentServices.removeAll(cacheServices);
|
||||
if (currentServices.size() > 0) {
|
||||
changed = true;
|
||||
}
|
||||
|
||||
// if some services disappear, publish event
|
||||
if (cacheServices.removeAll(new HashSet<>(serviceList))
|
||||
&& cacheServices.size() > 0) {
|
||||
changed = true;
|
||||
|
||||
for (String serviceName : cacheServices) {
|
||||
namingService.unsubscribe(serviceName,
|
||||
subscribeListeners.get(serviceName));
|
||||
subscribeListeners.remove(serviceName);
|
||||
}
|
||||
}
|
||||
|
||||
cacheServices = new HashSet<>(serviceList);
|
||||
|
||||
// subscribe services's node change, publish event if nodes changed
|
||||
for (String serviceName : cacheServices) {
|
||||
if (!subscribeListeners.containsKey(serviceName)) {
|
||||
EventListener eventListener = event -> NacosWatch.this.publisher
|
||||
.publishEvent(new HeartbeatEvent(NacosWatch.this,
|
||||
nacosWatchIndex.getAndIncrement()));
|
||||
subscribeListeners.put(serviceName, eventListener);
|
||||
namingService.subscribe(serviceName, eventListener);
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
if (changed) {
|
||||
this.publisher.publishEvent(
|
||||
new HeartbeatEvent(this, nacosWatchIndex.getAndIncrement()));
|
||||
}
|
||||
|
||||
}
|
||||
catch (Exception e) {
|
||||
log.error("Error watching Nacos Service change", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -16,8 +16,8 @@
|
||||
<dependencies>
|
||||
|
||||
<dependency>
|
||||
<groupId>com.alibaba.fescar</groupId>
|
||||
<artifactId>fescar-spring</artifactId>
|
||||
<groupId>io.seata</groupId>
|
||||
<artifactId>seata-spring</artifactId>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
|
@ -16,7 +16,7 @@
|
||||
|
||||
package org.springframework.cloud.alibaba.seata;
|
||||
|
||||
import com.alibaba.fescar.spring.annotation.GlobalTransactionScanner;
|
||||
import io.seata.spring.annotation.GlobalTransactionScanner;
|
||||
|
||||
import org.springframework.boot.context.properties.EnableConfigurationProperties;
|
||||
import org.springframework.context.ApplicationContext;
|
||||
|
@ -23,13 +23,11 @@ import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import com.alibaba.fescar.core.context.RootContext;
|
||||
|
||||
import org.springframework.beans.factory.BeanFactory;
|
||||
|
||||
import feign.Client;
|
||||
import feign.Request;
|
||||
import feign.Response;
|
||||
import io.seata.core.context.RootContext;
|
||||
import org.springframework.beans.factory.BeanFactory;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
/**
|
||||
@ -37,49 +35,49 @@ import org.springframework.util.StringUtils;
|
||||
*/
|
||||
public class SeataFeignClient implements Client {
|
||||
|
||||
private final Client delegate;
|
||||
private final BeanFactory beanFactory;
|
||||
private final Client delegate;
|
||||
private final BeanFactory beanFactory;
|
||||
private static final int MAP_SIZE = 16;
|
||||
|
||||
SeataFeignClient(BeanFactory beanFactory) {
|
||||
this.beanFactory = beanFactory;
|
||||
this.delegate = new Client.Default(null, null);
|
||||
}
|
||||
SeataFeignClient(BeanFactory beanFactory) {
|
||||
this.beanFactory = beanFactory;
|
||||
this.delegate = new Client.Default(null, null);
|
||||
}
|
||||
|
||||
SeataFeignClient(BeanFactory beanFactory, Client delegate) {
|
||||
this.delegate = delegate;
|
||||
this.beanFactory = beanFactory;
|
||||
}
|
||||
SeataFeignClient(BeanFactory beanFactory, Client delegate) {
|
||||
this.delegate = delegate;
|
||||
this.beanFactory = beanFactory;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Response execute(Request request, Request.Options options) throws IOException {
|
||||
@Override
|
||||
public Response execute(Request request, Request.Options options) throws IOException {
|
||||
|
||||
Request modifiedRequest = getModifyRequest(request);
|
||||
Request modifiedRequest = getModifyRequest(request);
|
||||
|
||||
try {
|
||||
return this.delegate.execute(modifiedRequest, options);
|
||||
}
|
||||
finally {
|
||||
try {
|
||||
return this.delegate.execute(modifiedRequest, options);
|
||||
} finally {
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private Request getModifyRequest(Request request) {
|
||||
private Request getModifyRequest(Request request) {
|
||||
|
||||
String xid = RootContext.getXID();
|
||||
String xid = RootContext.getXID();
|
||||
|
||||
if (StringUtils.isEmpty(xid)) {
|
||||
return request;
|
||||
}
|
||||
if (StringUtils.isEmpty(xid)) {
|
||||
return request;
|
||||
}
|
||||
|
||||
Map<String, Collection<String>> headers = new HashMap<>();
|
||||
headers.putAll(request.headers());
|
||||
Map<String, Collection<String>> headers = new HashMap<>(MAP_SIZE);
|
||||
headers.putAll(request.headers());
|
||||
|
||||
List<String> fescarXid = new ArrayList<>();
|
||||
fescarXid.add(xid);
|
||||
headers.put(RootContext.KEY_XID, fescarXid);
|
||||
List<String> fescarXid = new ArrayList<>();
|
||||
fescarXid.add(xid);
|
||||
headers.put(RootContext.KEY_XID, fescarXid);
|
||||
|
||||
return Request.create(request.method(), request.url(), headers, request.body(),
|
||||
request.charset());
|
||||
}
|
||||
return Request.create(request.method(), request.url(), headers, request.body(),
|
||||
request.charset());
|
||||
}
|
||||
|
||||
}
|
@ -65,19 +65,19 @@ public class SeataFeignClientAutoConfiguration {
|
||||
protected static class FeignBeanPostProcessorConfiguration {
|
||||
|
||||
@Bean
|
||||
SeataBeanPostProcessor fescarBeanPostProcessor(
|
||||
SeataBeanPostProcessor seataBeanPostProcessor(
|
||||
SeataFeignObjectWrapper seataFeignObjectWrapper) {
|
||||
return new SeataBeanPostProcessor(seataFeignObjectWrapper);
|
||||
}
|
||||
|
||||
@Bean
|
||||
SeataContextBeanPostProcessor fescarContextBeanPostProcessor(
|
||||
SeataContextBeanPostProcessor seataContextBeanPostProcessor(
|
||||
BeanFactory beanFactory) {
|
||||
return new SeataContextBeanPostProcessor(beanFactory);
|
||||
}
|
||||
|
||||
@Bean
|
||||
SeataFeignObjectWrapper fescarFeignObjectWrapper(BeanFactory beanFactory) {
|
||||
SeataFeignObjectWrapper seataFeignObjectWrapper(BeanFactory beanFactory) {
|
||||
return new SeataFeignObjectWrapper(beanFactory);
|
||||
}
|
||||
}
|
||||
|
@ -30,7 +30,7 @@ import com.netflix.hystrix.HystrixCommand;
|
||||
public class SeataHystrixAutoConfiguration {
|
||||
|
||||
@Bean
|
||||
SeataHystrixConcurrencyStrategy fescarHystrixConcurrencyStrategy() {
|
||||
SeataHystrixConcurrencyStrategy seataHystrixConcurrencyStrategy() {
|
||||
return new SeataHystrixConcurrencyStrategy();
|
||||
}
|
||||
|
||||
|
@ -17,7 +17,7 @@ package org.springframework.cloud.alibaba.seata.feign.hystrix;
|
||||
|
||||
import java.util.concurrent.Callable;
|
||||
|
||||
import com.alibaba.fescar.core.context.RootContext;
|
||||
import io.seata.core.context.RootContext;
|
||||
|
||||
import com.netflix.hystrix.strategy.HystrixPlugins;
|
||||
import com.netflix.hystrix.strategy.concurrency.HystrixConcurrencyStrategy;
|
||||
@ -37,7 +37,7 @@ public class SeataHystrixConcurrencyStrategy extends HystrixConcurrencyStrategy
|
||||
|
||||
@Override
|
||||
public <K> Callable<K> wrapCallable(Callable<K> c) {
|
||||
if (c instanceof FescarContextCallable) {
|
||||
if (c instanceof SeataContextCallable) {
|
||||
return c;
|
||||
}
|
||||
|
||||
@ -48,19 +48,19 @@ public class SeataHystrixConcurrencyStrategy extends HystrixConcurrencyStrategy
|
||||
else {
|
||||
wrappedCallable = c;
|
||||
}
|
||||
if (wrappedCallable instanceof FescarContextCallable) {
|
||||
if (wrappedCallable instanceof SeataContextCallable) {
|
||||
return wrappedCallable;
|
||||
}
|
||||
|
||||
return new FescarContextCallable<>(wrappedCallable);
|
||||
return new SeataContextCallable<>(wrappedCallable);
|
||||
}
|
||||
|
||||
private static class FescarContextCallable<K> implements Callable<K> {
|
||||
private static class SeataContextCallable<K> implements Callable<K> {
|
||||
|
||||
private final Callable<K> actual;
|
||||
private final String xid;
|
||||
|
||||
FescarContextCallable(Callable<K> actual) {
|
||||
SeataContextCallable(Callable<K> actual) {
|
||||
this.actual = actual;
|
||||
this.xid = RootContext.getXID();
|
||||
}
|
||||
|
@ -50,7 +50,7 @@ import java.util.List;
|
||||
public class SeataRestTemplateAutoConfiguration {
|
||||
|
||||
@Bean
|
||||
public SeataRestTemplateInterceptor fescarRestTemplateInterceptor() {
|
||||
public SeataRestTemplateInterceptor seataRestTemplateInterceptor() {
|
||||
return new SeataRestTemplateInterceptor();
|
||||
}
|
||||
|
||||
|
@ -18,7 +18,7 @@ package org.springframework.cloud.alibaba.seata.rest;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import com.alibaba.fescar.core.context.RootContext;
|
||||
import io.seata.core.context.RootContext;
|
||||
|
||||
import org.springframework.http.HttpRequest;
|
||||
import org.springframework.http.client.ClientHttpRequestExecution;
|
||||
|
@ -19,7 +19,7 @@ package org.springframework.cloud.alibaba.seata.web;
|
||||
import javax.servlet.http.HttpServletRequest;
|
||||
import javax.servlet.http.HttpServletResponse;
|
||||
|
||||
import com.alibaba.fescar.core.context.RootContext;
|
||||
import io.seata.core.context.RootContext;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
@ -29,10 +29,10 @@ import org.springframework.web.servlet.HandlerInterceptor;
|
||||
/**
|
||||
* @author xiaojing
|
||||
*
|
||||
* Fescar HandlerInterceptor, Convert Fescar information into
|
||||
* @see com.alibaba.fescar.core.context.RootContext from http request's header in
|
||||
* Seata HandlerInterceptor, Convert Seata information into
|
||||
* @see io.seata.core.context.RootContext from http request's header in
|
||||
* {@link org.springframework.web.servlet.HandlerInterceptor#preHandle(HttpServletRequest , HttpServletResponse , Object )},
|
||||
* And clean up Fescar information after servlet method invocation in
|
||||
* And clean up Seata information after servlet method invocation in
|
||||
* {@link org.springframework.web.servlet.HandlerInterceptor#afterCompletion(HttpServletRequest, HttpServletResponse, Object, Exception)}
|
||||
*/
|
||||
public class SeataHandlerInterceptor implements HandlerInterceptor {
|
||||
@ -42,7 +42,7 @@ public class SeataHandlerInterceptor implements HandlerInterceptor {
|
||||
|
||||
@Override
|
||||
public boolean preHandle(HttpServletRequest request, HttpServletResponse response,
|
||||
Object handler) throws Exception {
|
||||
Object handler) {
|
||||
|
||||
String xid = RootContext.getXID();
|
||||
String rpcXid = request.getHeader(RootContext.KEY_XID);
|
||||
@ -61,7 +61,7 @@ public class SeataHandlerInterceptor implements HandlerInterceptor {
|
||||
|
||||
@Override
|
||||
public void afterCompletion(HttpServletRequest request, HttpServletResponse response,
|
||||
Object handler, Exception e) throws Exception {
|
||||
Object handler, Exception e) {
|
||||
|
||||
String rpcXid = request.getHeader(RootContext.KEY_XID);
|
||||
|
||||
|
@ -16,13 +16,6 @@
|
||||
|
||||
package org.springframework.cloud.alicloud.acm.endpoint;
|
||||
|
||||
import java.text.DateFormat;
|
||||
import java.text.SimpleDateFormat;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.springframework.boot.actuate.endpoint.annotation.Endpoint;
|
||||
import org.springframework.boot.actuate.endpoint.annotation.ReadOperation;
|
||||
import org.springframework.cloud.alicloud.acm.AcmPropertySourceRepository;
|
||||
@ -30,6 +23,13 @@ import org.springframework.cloud.alicloud.acm.bootstrap.AcmPropertySource;
|
||||
import org.springframework.cloud.alicloud.acm.refresh.AcmRefreshHistory;
|
||||
import org.springframework.cloud.alicloud.context.acm.AcmProperties;
|
||||
|
||||
import java.text.DateFormat;
|
||||
import java.text.SimpleDateFormat;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Created on 01/10/2017.
|
||||
*
|
||||
|
@ -12,12 +12,12 @@ public class SmsProperties {
|
||||
/**
|
||||
* Product name.
|
||||
*/
|
||||
public static final String smsProduct = "Dysmsapi";
|
||||
public static final String SMS_PRODUCT = "Dysmsapi";
|
||||
|
||||
/**
|
||||
* Product domain.
|
||||
*/
|
||||
public static final String smsDomain = "dysmsapi.aliyuncs.com";
|
||||
public static final String SMS_DOMAIN = "dysmsapi.aliyuncs.com";
|
||||
|
||||
/**
|
||||
* Report queue name.
|
||||
|
@ -60,7 +60,7 @@ public class SmsInitializerEventListener
|
||||
// 初始化acsClient,暂不支持region化
|
||||
try {
|
||||
DefaultProfile.addEndpoint("cn-hangzhou", "cn-hangzhou",
|
||||
SmsProperties.smsProduct, SmsProperties.smsDomain);
|
||||
SmsProperties.SMS_PRODUCT, SmsProperties.SMS_DOMAIN);
|
||||
Collection<MessageListener> messageListeners = event.getApplicationContext()
|
||||
.getBeansOfType(MessageListener.class).values();
|
||||
if (messageListeners.isEmpty()) {
|
||||
|
@ -85,8 +85,8 @@ public class DefaultAlicomMessagePuller {
|
||||
this.executorService = executorService;
|
||||
}
|
||||
|
||||
protected static final Map<String, Object> sLockObjMap = new HashMap<String, Object>();
|
||||
protected static Map<String, Boolean> sPollingMap = new ConcurrentHashMap<String, Boolean>();
|
||||
protected static final Map<String, Object> S_LOCK_OBJ_MAP = new HashMap<>();
|
||||
protected static Map<String, Boolean> sPollingMap = new ConcurrentHashMap<>();
|
||||
protected Object lockObj;
|
||||
|
||||
public boolean setPolling(String queueName) {
|
||||
@ -309,11 +309,11 @@ public class DefaultAlicomMessagePuller {
|
||||
task.messageType = messageType;
|
||||
task.queueName = queueName;
|
||||
|
||||
synchronized (sLockObjMap) {
|
||||
lockObj = sLockObjMap.get(queueName);
|
||||
synchronized (S_LOCK_OBJ_MAP) {
|
||||
lockObj = S_LOCK_OBJ_MAP.get(queueName);
|
||||
if (lockObj == null) {
|
||||
lockObj = new Object();
|
||||
sLockObjMap.put(queueName, lockObj);
|
||||
S_LOCK_OBJ_MAP.put(queueName, lockObj);
|
||||
}
|
||||
}
|
||||
|
||||
@ -355,11 +355,11 @@ public class DefaultAlicomMessagePuller {
|
||||
task.messageType = messageType;
|
||||
task.queueName = queueName;
|
||||
|
||||
synchronized (sLockObjMap) {
|
||||
lockObj = sLockObjMap.get(queueName);
|
||||
synchronized (S_LOCK_OBJ_MAP) {
|
||||
lockObj = S_LOCK_OBJ_MAP.get(queueName);
|
||||
if (lockObj == null) {
|
||||
lockObj = new Object();
|
||||
sLockObjMap.put(queueName, lockObj);
|
||||
S_LOCK_OBJ_MAP.put(queueName, lockObj);
|
||||
}
|
||||
}
|
||||
|
||||
@ -402,11 +402,11 @@ public class DefaultAlicomMessagePuller {
|
||||
task.messageType = messageType;
|
||||
task.queueName = queueName;
|
||||
|
||||
synchronized (sLockObjMap) {
|
||||
lockObj = sLockObjMap.get(queueName);
|
||||
synchronized (S_LOCK_OBJ_MAP) {
|
||||
lockObj = S_LOCK_OBJ_MAP.get(queueName);
|
||||
if (lockObj == null) {
|
||||
lockObj = new Object();
|
||||
sLockObjMap.put(queueName, lockObj);
|
||||
S_LOCK_OBJ_MAP.put(queueName, lockObj);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -76,6 +76,7 @@ public class QueryTokenForMnsQueueRequest
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public Class<QueryTokenForMnsQueueResponse> getResponseClass() {
|
||||
return QueryTokenForMnsQueueResponse.class;
|
||||
}
|
||||
|
@ -60,6 +60,7 @@ public class QueryTokenForMnsQueueResponse extends AcsResponse {
|
||||
this.messageTokenDTO = messageTokenDTO;
|
||||
}
|
||||
|
||||
@Override
|
||||
public QueryTokenForMnsQueueResponse getInstance(UnmarshallerContext context) {
|
||||
return QueryTokenForMnsQueueResponseUnmarshaller.unmarshall(this, context);
|
||||
}
|
||||
|
@ -50,7 +50,7 @@ public class TokenGetterForAlicom {
|
||||
private String domainForPop;
|
||||
private IAcsClient iAcsClient;
|
||||
private Long ownerId;
|
||||
private final static String productName = "Dybaseapi";
|
||||
private final static String PRODUCT_NAME = "Dybaseapi";
|
||||
private long bufferTime = 1000 * 60 * 2;// 过期时间小于2分钟则重新获取,防止服务器时间误差
|
||||
private final Object lock = new Object();
|
||||
private ConcurrentMap<String, TokenForAlicom> tokenMap = new ConcurrentHashMap<String, TokenForAlicom>();
|
||||
@ -68,7 +68,7 @@ public class TokenGetterForAlicom {
|
||||
}
|
||||
|
||||
private void init() throws ClientException {
|
||||
DefaultProfile.addEndpoint(endpointNameForPop, regionIdForPop, productName,
|
||||
DefaultProfile.addEndpoint(endpointNameForPop, regionIdForPop, PRODUCT_NAME,
|
||||
domainForPop);
|
||||
IClientProfile profile = DefaultProfile.getProfile(regionIdForPop, accessKeyId,
|
||||
accessKeySecret);
|
||||
|
Loading…
x
Reference in New Issue
Block a user