1
0
mirror of https://gitee.com/mirrors/Spring-Cloud-Alibaba.git synced 2021-06-26 13:25:11 +08:00

sync code from master

This commit is contained in:
flystar32
2019-05-30 16:24:32 +08:00
parent c908e8c1cc
commit 8ddc5ee9a4
36 changed files with 367 additions and 199 deletions

View File

@@ -17,7 +17,7 @@ package org.springframework.cloud.alibaba.cloud.examples;
import java.util.Random;
import com.alibaba.fescar.core.context.RootContext;
import io.seata.core.context.RootContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

View File

@@ -16,17 +16,16 @@
package org.springframework.cloud.alibaba.cloud.examples;
import java.sql.SQLException;
import java.util.Random;
import com.alibaba.druid.pool.DruidDataSource;
import io.seata.rm.datasource.DataSourceProxy;
import org.springframework.context.ApplicationContext;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.core.env.Environment;
import org.springframework.jdbc.core.JdbcTemplate;
import com.alibaba.druid.pool.DruidDataSource;
import com.alibaba.fescar.rm.datasource.DataSourceProxy;
/**
* @author xiaojing
*/

View File

@@ -20,17 +20,26 @@ transport {
worker-thread-size = 8
}
}
store {
# branch session size , if exceeded first try compress lockkey, still exceeded throws exceptions
max-branch-session-size = 16384
# globe session size , if exceeded throws exceptions
max-global-session-size = 512
# file buffer size , if exceeded allocate new buffer
file-write-buffer-cache-size = 16384
# when recover batch read size
session.reload.read_size = 100
}
service {
#vgroup->rgroup
vgroup_mapping.account-service-fescar-service-group = "localRgroup"
vgroup_mapping.account-service-fescar-service-group = "default"
#only support single node
localRgroup.grouplist = "127.0.0.1:8091"
default.grouplist = "127.0.0.1:8091"
#degrade current not support
enableDegrade = false
#disable
disable = false
}
client {
async.commit.buffer.limit = 10000
lock {

View File

@@ -1,5 +1,5 @@
registry {
# file 、nacos 、redis
# file 、nacos 、eureka、redis、zk、consul
type = "file"
nacos {
@@ -7,17 +7,32 @@ registry {
namespace = "public"
cluster = "default"
}
eureka {
serviceUrl = "http://localhost:1001/eureka"
application = "default"
weight = "1"
}
redis {
serverAddr = "localhost:6379"
db = "0"
}
zk {
cluster = "default"
serverAddr = "127.0.0.1:2181"
session.timeout = 6000
connect.timeout = 2000
}
consul {
cluster = "default"
serverAddr = "127.0.0.1:8500"
}
file {
name = "file.conf"
}
}
config {
# file nacos apollo
# filenacos apollo、zk
type = "file"
nacos {
@@ -29,6 +44,11 @@ config {
app.id = "fescar-server"
apollo.meta = "http://192.168.1.204:8801"
}
zk {
serverAddr = "127.0.0.1:2181"
session.timeout = 6000
connect.timeout = 2000
}
file {
name = "file.conf"
}

View File

@@ -16,7 +16,7 @@
package org.springframework.cloud.alibaba.cloud.examples;
import com.alibaba.fescar.spring.annotation.GlobalTransactional;
import io.seata.spring.annotation.GlobalTransactional;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

View File

@@ -20,17 +20,46 @@ transport {
worker-thread-size = 8
}
}
## transaction log store
store {
## store mode: file、db
mode = "file"
## file store
file {
dir = "sessionStore"
# branch session size , if exceeded first try compress lockkey, still exceeded throws exceptions
max-branch-session-size = 16384
# globe session size , if exceeded throws exceptions
max-global-session-size = 512
# file buffer size , if exceeded allocate new buffer
file-write-buffer-cache-size = 16384
# when recover batch read size
session.reload.read_size = 100
# async, sync
flush-disk-mode = async
}
## database store
db {
driver_class = ""
url = ""
user = ""
password = ""
}
}
service {
#vgroup->rgroup
vgroup_mapping.business-service-fescar-service-group = "localRgroup"
vgroup_mapping.business-service-fescar-service-group = "default"
#only support single node
localRgroup.grouplist = "127.0.0.1:8091"
default.grouplist = "127.0.0.1:8091"
#degrade current not support
enableDegrade = false
#disable
disable = false
}
client {
async.commit.buffer.limit = 10000
lock {

View File

@@ -1,5 +1,5 @@
registry {
# file 、nacos 、redis
# file 、nacos 、eureka、redis、zk、consul、etcd3、sofa
type = "file"
nacos {
@@ -7,17 +7,45 @@ registry {
namespace = "public"
cluster = "default"
}
eureka {
serviceUrl = "http://localhost:1001/eureka"
application = "default"
weight = "1"
}
redis {
serverAddr = "localhost:6379"
db = "0"
}
zk {
cluster = "default"
serverAddr = "127.0.0.1:2181"
session.timeout = 6000
connect.timeout = 2000
}
consul {
cluster = "default"
serverAddr = "127.0.0.1:8500"
}
etcd3 {
cluster = "default"
serverAddr = "http://localhost:2379"
}
sofa {
serverAddr = "127.0.0.1:9603"
application = "default"
region = "DEFAULT_ZONE"
datacenter = "DefaultDataCenter"
cluster = "default"
group = "SEATA_GROUP"
addressWaitTime = "3000"
}
file {
name = "file.conf"
}
}
config {
# file nacos apollo
# filenacos apollo、zk
type = "file"
nacos {
@@ -26,9 +54,14 @@ config {
cluster = "default"
}
apollo {
app.id = "fescar-server"
app.id = "seata-server"
apollo.meta = "http://192.168.1.204:8801"
}
zk {
serverAddr = "127.0.0.1:2181"
session.timeout = 6000
connect.timeout = 2000
}
file {
name = "file.conf"
}

View File

@@ -24,7 +24,7 @@ import org.springframework.core.env.Environment;
import org.springframework.jdbc.core.JdbcTemplate;
import com.alibaba.druid.pool.DruidDataSource;
import com.alibaba.fescar.rm.datasource.DataSourceProxy;
import io.seata.rm.datasource.DataSourceProxy;
/**
* @author xiaojing

View File

@@ -16,7 +16,7 @@
package org.springframework.cloud.alibaba.cloud.examples;
import com.alibaba.fescar.core.context.RootContext;
import io.seata.core.context.RootContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

View File

@@ -20,17 +20,46 @@ transport {
worker-thread-size = 8
}
}
## transaction log store
store {
## store mode: file、db
mode = "file"
## file store
file {
dir = "sessionStore"
# branch session size , if exceeded first try compress lockkey, still exceeded throws exceptions
max-branch-session-size = 16384
# globe session size , if exceeded throws exceptions
max-global-session-size = 512
# file buffer size , if exceeded allocate new buffer
file-write-buffer-cache-size = 16384
# when recover batch read size
session.reload.read_size = 100
# async, sync
flush-disk-mode = async
}
## database store
db {
driver_class = ""
url = ""
user = ""
password = ""
}
}
service {
#vgroup->rgroup
vgroup_mapping.order-service-fescar-service-group = "localRgroup"
vgroup_mapping.order-service-fescar-service-group = "default"
#only support single node
localRgroup.grouplist = "127.0.0.1:8091"
default.grouplist = "127.0.0.1:8091"
#degrade current not support
enableDegrade = false
#disable
disable = false
}
client {
async.commit.buffer.limit = 10000
lock {

View File

@@ -1,5 +1,5 @@
registry {
# file 、nacos 、redis
# file 、nacos 、eureka、redis、zk、consul、etcd3、sofa
type = "file"
nacos {
@@ -7,17 +7,45 @@ registry {
namespace = "public"
cluster = "default"
}
eureka {
serviceUrl = "http://localhost:1001/eureka"
application = "default"
weight = "1"
}
redis {
serverAddr = "localhost:6379"
db = "0"
}
zk {
cluster = "default"
serverAddr = "127.0.0.1:2181"
session.timeout = 6000
connect.timeout = 2000
}
consul {
cluster = "default"
serverAddr = "127.0.0.1:8500"
}
etcd3 {
cluster = "default"
serverAddr = "http://localhost:2379"
}
sofa {
serverAddr = "127.0.0.1:9603"
application = "default"
region = "DEFAULT_ZONE"
datacenter = "DefaultDataCenter"
cluster = "default"
group = "SEATA_GROUP"
addressWaitTime = "3000"
}
file {
name = "file.conf"
}
}
config {
# file nacos apollo
# filenacos apollo、zk
type = "file"
nacos {
@@ -26,9 +54,14 @@ config {
cluster = "default"
}
apollo {
app.id = "fescar-server"
app.id = "seata-server"
apollo.meta = "http://192.168.1.204:8801"
}
zk {
serverAddr = "127.0.0.1:2181"
session.timeout = 6000
connect.timeout = 2000
}
file {
name = "file.conf"
}

View File

@@ -19,14 +19,14 @@
1. 创建 示例中 业务所需要的数据库表
1. 启动 Fescar Server
1. 启动 Seata Server
### 配置数据库
首先,你需要有一个支持 InnoDB 引擎的 MySQL 数据库。
**注意** 实际上,Fescar 支持不同的应用使用完全不相干的数据库,但是这里为了简单地演示一个原理,所以我们选择了只使用一个数据库。
**注意** 实际上,Seata 支持不同的应用使用完全不相干的数据库,但是这里为了简单地演示一个原理,所以我们选择了只使用一个数据库。
`account-server``order-service``storage-service` 这三个应用中的 resources 目录下的 `application.properties` 文件中的如下配置修改成你运行环境中的实际配置。
@@ -42,7 +42,7 @@ mysql.user.password=your mysql server password
### 创建 undo_log 表
[Fescar AT 模式]() 需要使用到 undo_log 表。
[Seata AT 模式]() 需要使用到 undo_log 表。
``` $sql
-- 注意此处0.3.0+ 增加唯一索引 ux_undo_log
@@ -93,24 +93,24 @@ CREATE TABLE `account_tbl` (
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
```
### 启动 Fescar Server
### 启动 Seata Server
点击这个页面 [https://github.com/alibaba/fescar/releases](https://github.com/alibaba/fescar/releases),下载最新版本的 Fescar Server 端.
点击这个页面 [https://github.com/seata/seata/releases](https://github.com/seata/seata/releases),下载最新版本的 Seata Server 端.
进入解压之后的 bin 目录,执行如下命令来启动
```$shell
sh fescar-server.sh $LISTEN_PORT $PATH_FOR_PERSISTENT_DATA
sh seata-server.sh $LISTEN_PORT $MODE(file or db)
```
在这个示例中,采用如下命令来启动 Fescar Server
在这个示例中,采用如下命令来启动 Seata Server
```$shell
sh fescar-server.sh 8091 ~/fescar/data/
sh seata-server.sh 8091 file
```
**注意** 如果你修改了端口号,那么记得需要在各个示例工程中的 `application.conf` 文件中,修改 grouplist 的值。
**注意** 如果你修改了endpoint且注册中心使用默认file类型,那么记得需要在各个示例工程中的 `file.conf` 文件中,修改 grouplist 的值(当registry.conf 中registry.type 或 config.type 为file 时会读取内部的file节点中的文件名若type不为file将直接从配置类型的对应元数据的注册配置中心读取数据),推荐大家使用 nacos 作为配置注册中心
## 运行示例
@@ -146,12 +146,12 @@ http://127.0.0.1:18081/fescar/rest
## 对 Spring Cloud 支持点
- 通过 Spring MVC 提供服务的服务提供者,在收到 header 中含有 Fescar 信息的 HTTP 请求时,可以自动还原 Fescar 上下文。
- 通过 Spring MVC 提供服务的服务提供者,在收到 header 中含有 Seata 信息的 HTTP 请求时,可以自动还原 Seata 上下文。
- 支持服务调用者通过 RestTemplate 调用时,自动传递 Fescar 上下文。
- 支持服务调用者通过 RestTemplate 调用时,自动传递 Seata 上下文。
- 支持服务调用者通过 FeignClient 调用时,自动传递 Fescar 上下文。
- 支持服务调用者通过 FeignClient 调用时,自动传递 Seata 上下文。
- 支持 FeignClient 和 Hystrix 同时使用的场景。
- 支持 SeataClient 和 Hystrix 同时使用的场景。
- 支持 FeignClient 和 Sentinel 同时使用的场景。
- 支持 SeataClient 和 Sentinel 同时使用的场景。

View File

@@ -19,7 +19,7 @@ package org.springframework.cloud.alibaba.cloud.examples;
import java.sql.SQLException;
import com.alibaba.druid.pool.DruidDataSource;
import com.alibaba.fescar.rm.datasource.DataSourceProxy;
import io.seata.rm.datasource.DataSourceProxy;
import org.springframework.context.ApplicationContext;
import org.springframework.context.annotation.Bean;

View File

@@ -16,7 +16,7 @@
package org.springframework.cloud.alibaba.cloud.examples;
import com.alibaba.fescar.core.context.RootContext;
import io.seata.core.context.RootContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

View File

@@ -20,17 +20,46 @@ transport {
worker-thread-size = 8
}
}
## transaction log store
store {
## store mode: file、db
mode = "file"
## file store
file {
dir = "sessionStore"
# branch session size , if exceeded first try compress lockkey, still exceeded throws exceptions
max-branch-session-size = 16384
# globe session size , if exceeded throws exceptions
max-global-session-size = 512
# file buffer size , if exceeded allocate new buffer
file-write-buffer-cache-size = 16384
# when recover batch read size
session.reload.read_size = 100
# async, sync
flush-disk-mode = async
}
## database store
db {
driver_class = ""
url = ""
user = ""
password = ""
}
}
service {
#vgroup->rgroup
vgroup_mapping.storage-service-fescar-service-group = "localRgroup"
vgroup_mapping.storage-service-fescar-service-group = "default"
#only support single node
localRgroup.grouplist = "127.0.0.1:8091"
default.grouplist = "127.0.0.1:8091"
#degrade current not support
enableDegrade = false
#disable
disable = false
}
client {
async.commit.buffer.limit = 10000
lock {

View File

@@ -1,5 +1,5 @@
registry {
# file 、nacos 、redis
# file 、nacos 、eureka、redis、zk、consul、etcd3、sofa
type = "file"
nacos {
@@ -7,17 +7,45 @@ registry {
namespace = "public"
cluster = "default"
}
eureka {
serviceUrl = "http://localhost:1001/eureka"
application = "default"
weight = "1"
}
redis {
serverAddr = "localhost:6379"
db = "0"
}
zk {
cluster = "default"
serverAddr = "127.0.0.1:2181"
session.timeout = 6000
connect.timeout = 2000
}
consul {
cluster = "default"
serverAddr = "127.0.0.1:8500"
}
etcd3 {
cluster = "default"
serverAddr = "http://localhost:2379"
}
sofa {
serverAddr = "127.0.0.1:9603"
application = "default"
region = "DEFAULT_ZONE"
datacenter = "DefaultDataCenter"
cluster = "default"
group = "SEATA_GROUP"
addressWaitTime = "3000"
}
file {
name = "file.conf"
}
}
config {
# file nacos apollo
# filenacos apollo、zk
type = "file"
nacos {
@@ -26,9 +54,14 @@ config {
cluster = "default"
}
apollo {
app.id = "fescar-server"
app.id = "seata-server"
apollo.meta = "http://192.168.1.204:8801"
}
zk {
serverAddr = "127.0.0.1:2181"
session.timeout = 6000
connect.timeout = 2000
}
file {
name = "file.conf"
}