1
0
mirror of https://gitee.com/mirrors/Spring-Cloud-Alibaba.git synced 2021-06-26 13:25:11 +08:00

Merge remote-tracking branch 'upstream/master'

This commit is contained in:
fangjian0423 2019-05-21 11:16:48 +08:00
commit ac41b57263
30 changed files with 214 additions and 128 deletions

View File

@ -19,7 +19,7 @@
<properties>
<sentinel.version>1.6.0</sentinel.version>
<oss.version>3.1.0</oss.version>
<seata.version>0.5.0</seata.version>
<seata.version>0.5.1</seata.version>
<nacos.client.version>1.0.0</nacos.client.version>
<nacos.config.version>0.8.0</nacos.config.version>
<acm.version>1.0.9</acm.version>
@ -31,7 +31,7 @@
<schedulerX.client.version>2.1.6</schedulerX.client.version>
<dubbo.version>2.7.1</dubbo.version>
<dubbo-spring-boot.version>2.7.1</dubbo-spring-boot.version>
<dubbo-registry-nacos.version>0.0.2</dubbo-registry-nacos.version>
<dubbo-registry-nacos.version>2.7.1</dubbo-registry-nacos.version>
<aliyun.java.sdk.dysmsapi>1.1.0</aliyun.java.sdk.dysmsapi>
<aliyun.sdk.mns>1.1.8.6</aliyun.sdk.mns>
<aliyun.java.sdk.dyvmsapi>1.1.1</aliyun.java.sdk.dyvmsapi>
@ -243,7 +243,7 @@
<!-- Dubbo Nacos registry dependency -->
<dependency>
<groupId>com.alibaba</groupId>
<groupId>org.apache.dubbo</groupId>
<artifactId>dubbo-registry-nacos</artifactId>
<version>${dubbo-registry-nacos.version}</version>
</dependency>

View File

@ -60,6 +60,7 @@ public class DefaultHttpRequest implements HttpRequest {
return HttpMethod.resolve(getMethodValue());
}
@Override
public String getMethodValue() {
return method;
}

View File

@ -73,6 +73,7 @@ public class MutableHttpServerRequest implements HttpServerRequest {
}
// Override method since Spring Framework 5.0
@Override
public String getMethodValue() {
return httpMethod.name();
}

View File

@ -44,8 +44,12 @@ public class DubboRestServiceMetadata {
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof DubboRestServiceMetadata)) return false;
if (this == o) {
return true;
}
if (!(o instanceof DubboRestServiceMetadata)) {
return false;
}
DubboRestServiceMetadata that = (DubboRestServiceMetadata) o;
return Objects.equals(serviceRestMetadata, that.serviceRestMetadata) &&
Objects.equals(restMethodMetadata, that.restMethodMetadata);

View File

@ -77,8 +77,12 @@ public class DubboTransportedMethodMetadata {
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof DubboTransportedMethodMetadata)) return false;
if (this == o) {
return true;
}
if (!(o instanceof DubboTransportedMethodMetadata)) {
return false;
}
DubboTransportedMethodMetadata that = (DubboTransportedMethodMetadata) o;
return Objects.equals(methodMetadata, that.methodMetadata) &&
Objects.equals(attributes, that.attributes);

View File

@ -110,8 +110,12 @@ public class MethodMetadata {
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
MethodMetadata that = (MethodMetadata) o;
return Objects.equals(name, that.name) &&
Objects.equals(returnType, that.returnType) &&

View File

@ -61,8 +61,12 @@ public class MethodParameterMetadata {
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
MethodParameterMetadata that = (MethodParameterMetadata) o;
return index == that.index &&
Objects.equals(name, that.name) &&

View File

@ -235,8 +235,12 @@ public class RequestMetadata {
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof RequestMetadata)) return false;
if (this == o) {
return true;
}
if (!(o instanceof RequestMetadata)) {
return false;
}
RequestMetadata that = (RequestMetadata) o;
return Objects.equals(method, that.method) &&
Objects.equals(path, that.path) &&

View File

@ -183,8 +183,12 @@ public class RestMethodMetadata {
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof RestMethodMetadata)) return false;
if (this == o) {
return true;
}
if (!(o instanceof RestMethodMetadata)) {
return false;
}
RestMethodMetadata that = (RestMethodMetadata) o;
return queryMapEncoded == that.queryMapEncoded &&
Objects.equals(method, that.method) &&

View File

@ -52,8 +52,12 @@ public class ServiceRestMetadata {
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof ServiceRestMetadata)) return false;
if (this == o) {
return true;
}
if (!(o instanceof ServiceRestMetadata)) {
return false;
}
ServiceRestMetadata that = (ServiceRestMetadata) o;
return Objects.equals(url, that.url) &&
Objects.equals(meta, that.meta);

View File

@ -64,7 +64,7 @@ public abstract class AbstractSpringCloudRegistry extends FailbackRegistry {
protected static final String DUBBO_METADATA_SERVICE_CLASS_NAME = DubboMetadataService.class.getName();
private static final Set<String> schedulerTasks = new HashSet<>();
private static final Set<String> SCHEDULER_TASKS = new HashSet<>();
protected final Logger logger = LoggerFactory.getLogger(getClass());
@ -164,7 +164,7 @@ public abstract class AbstractSpringCloudRegistry extends FailbackRegistry {
private void submitSchedulerTaskIfAbsent(URL url, NotifyListener listener) {
String taskId = url.toIdentityString();
if (schedulerTasks.add(taskId)) {
if (SCHEDULER_TASKS.add(taskId)) {
schedule(() -> doSubscribeDubboServiceURLs(url, listener));
}
}

View File

@ -110,6 +110,7 @@ public class DubboGenericServiceFactory {
dataBinder.registerCustomEditor(String.class, "listener", new StringTrimmerEditor(true));
dataBinder.registerCustomEditor(Map.class, "parameters", new PropertyEditorSupport() {
@Override
public void setAsText(String text) throws java.lang.IllegalArgumentException {
// Trim all whitespace
String content = StringUtils.trimAllWhitespace(text);

View File

@ -20,15 +20,35 @@ transport {
worker-thread-size = 8
}
}
## transaction log store
store {
# branch session size , if exceeded first try compress lockkey, still exceeded throws exceptions
max-branch-session-size = 16384
# globe session size , if exceeded throws exceptions
max-global-session-size = 512
# file buffer size , if exceeded allocate new buffer
file-write-buffer-cache-size = 16384
# when recover batch read size
session.reload.read_size = 100
## store mode: file、db
mode = "file"
## file store
file {
dir = "sessionStore"
# branch session size , if exceeded first try compress lockkey, still exceeded throws exceptions
max-branch-session-size = 16384
# globe session size , if exceeded throws exceptions
max-global-session-size = 512
# file buffer size , if exceeded allocate new buffer
file-write-buffer-cache-size = 16384
# when recover batch read size
session.reload.read_size = 100
# async, sync
flush-disk-mode = async
}
## database store
db {
driver_class = ""
url = ""
user = ""
password = ""
}
}
service {
#vgroup->rgroup

View File

@ -1,5 +1,5 @@
registry {
# file 、nacos 、eureka、redis、zk、consul
# file 、nacos 、eureka、redis、zk、consul、etcd3、sofa
type = "file"
nacos {
@ -26,6 +26,19 @@ registry {
cluster = "default"
serverAddr = "127.0.0.1:8500"
}
etcd3 {
cluster = "default"
serverAddr = "http://localhost:2379"
}
sofa {
serverAddr = "127.0.0.1:9603"
application = "default"
region = "DEFAULT_ZONE"
datacenter = "DefaultDataCenter"
cluster = "default"
group = "SEATA_GROUP"
addressWaitTime = "3000"
}
file {
name = "file.conf"
}
@ -41,7 +54,7 @@ config {
cluster = "default"
}
apollo {
app.id = "fescar-server"
app.id = "seata-server"
apollo.meta = "http://192.168.1.204:8801"
}
zk {
@ -52,4 +65,4 @@ config {
file {
name = "file.conf"
}
}
}

View File

@ -20,15 +20,35 @@ transport {
worker-thread-size = 8
}
}
## transaction log store
store {
# branch session size , if exceeded first try compress lockkey, still exceeded throws exceptions
max-branch-session-size = 16384
# globe session size , if exceeded throws exceptions
max-global-session-size = 512
# file buffer size , if exceeded allocate new buffer
file-write-buffer-cache-size = 16384
# when recover batch read size
session.reload.read_size = 100
## store mode: file、db
mode = "file"
## file store
file {
dir = "sessionStore"
# branch session size , if exceeded first try compress lockkey, still exceeded throws exceptions
max-branch-session-size = 16384
# globe session size , if exceeded throws exceptions
max-global-session-size = 512
# file buffer size , if exceeded allocate new buffer
file-write-buffer-cache-size = 16384
# when recover batch read size
session.reload.read_size = 100
# async, sync
flush-disk-mode = async
}
## database store
db {
driver_class = ""
url = ""
user = ""
password = ""
}
}
service {
#vgroup->rgroup

View File

@ -1,5 +1,5 @@
registry {
# file 、nacos 、eureka、redis、zk、consul
# file 、nacos 、eureka、redis、zk、consul、etcd3、sofa
type = "file"
nacos {
@ -26,6 +26,19 @@ registry {
cluster = "default"
serverAddr = "127.0.0.1:8500"
}
etcd3 {
cluster = "default"
serverAddr = "http://localhost:2379"
}
sofa {
serverAddr = "127.0.0.1:9603"
application = "default"
region = "DEFAULT_ZONE"
datacenter = "DefaultDataCenter"
cluster = "default"
group = "SEATA_GROUP"
addressWaitTime = "3000"
}
file {
name = "file.conf"
}
@ -41,7 +54,7 @@ config {
cluster = "default"
}
apollo {
app.id = "fescar-server"
app.id = "seata-server"
apollo.meta = "http://192.168.1.204:8801"
}
zk {
@ -52,4 +65,4 @@ config {
file {
name = "file.conf"
}
}
}

View File

@ -101,7 +101,7 @@ CREATE TABLE `account_tbl` (
进入解压之后的 bin 目录,执行如下命令来启动
```$shell
sh seata-server.sh $LISTEN_PORT $MODE
sh seata-server.sh $LISTEN_PORT $MODE(file or db)
```
在这个示例中,采用如下命令来启动 Seata Server
@ -110,7 +110,7 @@ sh seata-server.sh $LISTEN_PORT $MODE
sh seata-server.sh 8091 file
```
**注意** 如果你修改了endpoint且注册中心使用默认file类型那么记得需要在各个示例工程中的 `file.conf` 文件中,修改 grouplist 的值。
**注意** 如果你修改了endpoint且注册中心使用默认file类型那么记得需要在各个示例工程中的 `file.conf` 文件中,修改 grouplist 的值(当registry.conf 中registry.type 或 config.type 为file 时会读取内部的file节点中的文件名若type不为file将直接从配置类型的对应元数据的注册配置中心读取数据),推荐大家使用 nacos 作为配置注册中心
## 运行示例

View File

@ -20,15 +20,35 @@ transport {
worker-thread-size = 8
}
}
## transaction log store
store {
# branch session size , if exceeded first try compress lockkey, still exceeded throws exceptions
max-branch-session-size = 16384
# globe session size , if exceeded throws exceptions
max-global-session-size = 512
# file buffer size , if exceeded allocate new buffer
file-write-buffer-cache-size = 16384
# when recover batch read size
session.reload.read_size = 100
## store mode: file、db
mode = "file"
## file store
file {
dir = "sessionStore"
# branch session size , if exceeded first try compress lockkey, still exceeded throws exceptions
max-branch-session-size = 16384
# globe session size , if exceeded throws exceptions
max-global-session-size = 512
# file buffer size , if exceeded allocate new buffer
file-write-buffer-cache-size = 16384
# when recover batch read size
session.reload.read_size = 100
# async, sync
flush-disk-mode = async
}
## database store
db {
driver_class = ""
url = ""
user = ""
password = ""
}
}
service {
#vgroup->rgroup

View File

@ -1,5 +1,5 @@
registry {
# file 、nacos 、eureka、redis、zk、consul
# file 、nacos 、eureka、redis、zk、consul、etcd3、sofa
type = "file"
nacos {
@ -26,6 +26,19 @@ registry {
cluster = "default"
serverAddr = "127.0.0.1:8500"
}
etcd3 {
cluster = "default"
serverAddr = "http://localhost:2379"
}
sofa {
serverAddr = "127.0.0.1:9603"
application = "default"
region = "DEFAULT_ZONE"
datacenter = "DefaultDataCenter"
cluster = "default"
group = "SEATA_GROUP"
addressWaitTime = "3000"
}
file {
name = "file.conf"
}
@ -41,7 +54,7 @@ config {
cluster = "default"
}
apollo {
app.id = "fescar-server"
app.id = "seata-server"
apollo.meta = "http://192.168.1.204:8801"
}
zk {
@ -52,4 +65,4 @@ config {
file {
name = "file.conf"
}
}
}

View File

@ -76,6 +76,7 @@ public class DubboGatewayServlet extends HttpServletBean {
return serviceName;
}
@Override
public void service(HttpServletRequest request, HttpServletResponse response) throws IOException, ServletException {
String serviceName = resolveServiceName(request);

View File

@ -28,25 +28,25 @@ import java.util.concurrent.ConcurrentHashMap;
*/
public class NacosPropertySourceRepository {
private final static ConcurrentHashMap<String, NacosPropertySource> nacosPropertySourceRepository = new ConcurrentHashMap<>();
private final static ConcurrentHashMap<String, NacosPropertySource> NACOS_PROPERTY_SOURCE_REPOSITORY = new ConcurrentHashMap<>();
/**
* @return all nacos properties from application context
*/
public static List<NacosPropertySource> getAll() {
List<NacosPropertySource> result = new ArrayList<>();
result.addAll(nacosPropertySourceRepository.values());
result.addAll(NACOS_PROPERTY_SOURCE_REPOSITORY.values());
return result;
}
public static void collectNacosPropertySources(
NacosPropertySource nacosPropertySource) {
nacosPropertySourceRepository.putIfAbsent(nacosPropertySource.getDataId(),
NACOS_PROPERTY_SOURCE_REPOSITORY.putIfAbsent(nacosPropertySource.getDataId(),
nacosPropertySource);
}
public static NacosPropertySource getNacosPropertySource(String dataId) {
return nacosPropertySourceRepository.get(dataId);
return NACOS_PROPERTY_SOURCE_REPOSITORY.get(dataId);
}
}

View File

@ -118,57 +118,10 @@ public class NacosWatch implements ApplicationEventPublisherAware, SmartLifecycl
}
public void nacosServicesWatch() {
try {
boolean changed = false;
NamingService namingService = properties.namingServiceInstance();
// nacos doesn't support watch now , publish an event every 30 seconds.
this.publisher.publishEvent(
new HeartbeatEvent(this, nacosWatchIndex.getAndIncrement()));
ListView<String> listView = properties.namingServiceInstance()
.getServicesOfServer(1, Integer.MAX_VALUE);
List<String> serviceList = listView.getData();
// if there are new services found, publish event
Set<String> currentServices = new HashSet<>(serviceList);
currentServices.removeAll(cacheServices);
if (currentServices.size() > 0) {
changed = true;
}
// if some services disappear, publish event
if (cacheServices.removeAll(new HashSet<>(serviceList))
&& cacheServices.size() > 0) {
changed = true;
for (String serviceName : cacheServices) {
namingService.unsubscribe(serviceName,
subscribeListeners.get(serviceName));
subscribeListeners.remove(serviceName);
}
}
cacheServices = new HashSet<>(serviceList);
// subscribe services's node change, publish event if nodes changed
for (String serviceName : cacheServices) {
if (!subscribeListeners.containsKey(serviceName)) {
EventListener eventListener = event -> NacosWatch.this.publisher
.publishEvent(new HeartbeatEvent(NacosWatch.this,
nacosWatchIndex.getAndIncrement()));
subscribeListeners.put(serviceName, eventListener);
namingService.subscribe(serviceName, eventListener);
}
}
if (changed) {
this.publisher.publishEvent(
new HeartbeatEvent(this, nacosWatchIndex.getAndIncrement()));
}
}
catch (Exception e) {
log.error("Error watching Nacos Service change", e);
}
}
}

View File

@ -25,7 +25,7 @@ import feign.MethodMetadata;
/**
*
* Using static field {@link SentinelContractHolder#metadataMap} to hold
* Using static field {@link SentinelContractHolder#METADATA_MAP} to hold
* {@link MethodMetadata} data
*
* @author <a href="mailto:fangjian0423@gmail.com">Jim</a>
@ -38,7 +38,7 @@ public class SentinelContractHolder implements Contract {
* map key is constructed by ClassFullName + configKey. configKey is constructed by
* {@link feign.Feign#configKey}
*/
public final static Map<String, MethodMetadata> metadataMap = new HashMap();
public final static Map<String, MethodMetadata> METADATA_MAP = new HashMap<>();
public SentinelContractHolder(Contract delegate) {
this.delegate = delegate;
@ -47,7 +47,7 @@ public class SentinelContractHolder implements Contract {
@Override
public List<MethodMetadata> parseAndValidatateMetadata(Class<?> targetType) {
List<MethodMetadata> metadatas = delegate.parseAndValidatateMetadata(targetType);
metadatas.forEach(metadata -> metadataMap
metadatas.forEach(metadata -> METADATA_MAP
.put(targetType.getName() + metadata.configKey(), metadata));
return metadatas;
}

View File

@ -90,7 +90,7 @@ public class SentinelInvocationHandler implements InvocationHandler {
// only handle by HardCodedTarget
if (target instanceof Target.HardCodedTarget) {
Target.HardCodedTarget hardCodedTarget = (Target.HardCodedTarget) target;
MethodMetadata methodMetadata = SentinelContractHolder.metadataMap
MethodMetadata methodMetadata = SentinelContractHolder.METADATA_MAP
.get(hardCodedTarget.type().getName()
+ Feign.configKey(hardCodedTarget.type(), method));
// resource default is HttpMethod:protocol://url

View File

@ -12,12 +12,12 @@ public class SmsProperties {
/**
* Product name.
*/
public static final String smsProduct = "Dysmsapi";
public static final String SMS_PRODUCT = "Dysmsapi";
/**
* Product domain.
*/
public static final String smsDomain = "dysmsapi.aliyuncs.com";
public static final String SMS_DOMAIN = "dysmsapi.aliyuncs.com";
/**
* Report queue name.

View File

@ -60,7 +60,7 @@ public class SmsInitializerEventListener
// 初始化acsClient,暂不支持region化
try {
DefaultProfile.addEndpoint("cn-hangzhou", "cn-hangzhou",
SmsProperties.smsProduct, SmsProperties.smsDomain);
SmsProperties.SMS_PRODUCT, SmsProperties.SMS_DOMAIN);
Collection<MessageListener> messageListeners = event.getApplicationContext()
.getBeansOfType(MessageListener.class).values();
if (messageListeners.isEmpty()) {

View File

@ -85,8 +85,8 @@ public class DefaultAlicomMessagePuller {
this.executorService = executorService;
}
protected static final Map<String, Object> sLockObjMap = new HashMap<String, Object>();
protected static Map<String, Boolean> sPollingMap = new ConcurrentHashMap<String, Boolean>();
protected static final Map<String, Object> S_LOCK_OBJ_MAP = new HashMap<>();
protected static Map<String, Boolean> sPollingMap = new ConcurrentHashMap<>();
protected Object lockObj;
public boolean setPolling(String queueName) {
@ -309,11 +309,11 @@ public class DefaultAlicomMessagePuller {
task.messageType = messageType;
task.queueName = queueName;
synchronized (sLockObjMap) {
lockObj = sLockObjMap.get(queueName);
synchronized (S_LOCK_OBJ_MAP) {
lockObj = S_LOCK_OBJ_MAP.get(queueName);
if (lockObj == null) {
lockObj = new Object();
sLockObjMap.put(queueName, lockObj);
S_LOCK_OBJ_MAP.put(queueName, lockObj);
}
}
@ -355,11 +355,11 @@ public class DefaultAlicomMessagePuller {
task.messageType = messageType;
task.queueName = queueName;
synchronized (sLockObjMap) {
lockObj = sLockObjMap.get(queueName);
synchronized (S_LOCK_OBJ_MAP) {
lockObj = S_LOCK_OBJ_MAP.get(queueName);
if (lockObj == null) {
lockObj = new Object();
sLockObjMap.put(queueName, lockObj);
S_LOCK_OBJ_MAP.put(queueName, lockObj);
}
}
@ -402,11 +402,11 @@ public class DefaultAlicomMessagePuller {
task.messageType = messageType;
task.queueName = queueName;
synchronized (sLockObjMap) {
lockObj = sLockObjMap.get(queueName);
synchronized (S_LOCK_OBJ_MAP) {
lockObj = S_LOCK_OBJ_MAP.get(queueName);
if (lockObj == null) {
lockObj = new Object();
sLockObjMap.put(queueName, lockObj);
S_LOCK_OBJ_MAP.put(queueName, lockObj);
}
}

View File

@ -76,6 +76,7 @@ public class QueryTokenForMnsQueueRequest
}
@Override
public Class<QueryTokenForMnsQueueResponse> getResponseClass() {
return QueryTokenForMnsQueueResponse.class;
}

View File

@ -60,6 +60,7 @@ public class QueryTokenForMnsQueueResponse extends AcsResponse {
this.messageTokenDTO = messageTokenDTO;
}
@Override
public QueryTokenForMnsQueueResponse getInstance(UnmarshallerContext context) {
return QueryTokenForMnsQueueResponseUnmarshaller.unmarshall(this, context);
}

View File

@ -50,7 +50,7 @@ public class TokenGetterForAlicom {
private String domainForPop;
private IAcsClient iAcsClient;
private Long ownerId;
private final static String productName = "Dybaseapi";
private final static String PRODUCT_NAME = "Dybaseapi";
private long bufferTime = 1000 * 60 * 2;// 过期时间小于2分钟则重新获取防止服务器时间误差
private final Object lock = new Object();
private ConcurrentMap<String, TokenForAlicom> tokenMap = new ConcurrentHashMap<String, TokenForAlicom>();
@ -68,7 +68,7 @@ public class TokenGetterForAlicom {
}
private void init() throws ClientException {
DefaultProfile.addEndpoint(endpointNameForPop, regionIdForPop, productName,
DefaultProfile.addEndpoint(endpointNameForPop, regionIdForPop, PRODUCT_NAME,
domainForPop);
IClientProfile profile = DefaultProfile.getProfile(regionIdForPop, accessKeyId,
accessKeySecret);