@@ -3,7 +3,7 @@ port: "8091" | |||
max_rollback_retry_timeout: -1 | |||
max_commit_retry_timeout: -1 | |||
timeout_retry_period: "1s" | |||
rollbacking_retry_period: "1s" | |||
rolling_back_retry_period: "1s" | |||
committing_retry_period: "1s" | |||
async_committing_retry_period: "10s" | |||
log_delete_period: "24h" | |||
@@ -1,6 +1,7 @@ | |||
package main | |||
import ( | |||
"fmt" | |||
"os" | |||
"strconv" | |||
) | |||
@@ -42,19 +43,26 @@ func main() { | |||
}, | |||
Action: func(c *cli.Context) error { | |||
configPath := c.String("config") | |||
serverNode := c.Int("serverNode") | |||
ip, _ := gxnet.GetLocalIP() | |||
serverNode := c.Int64("serverNode") | |||
conf, err := config.InitConf(configPath) | |||
if err != nil { | |||
log.Fatal(err) | |||
} | |||
config.InitConf(configPath) | |||
conf := config.GetServerConfig() | |||
port, _ := strconv.Atoi(conf.Port) | |||
common.GetXID().Init(ip, port) | |||
ip, _ := gxnet.GetLocalIP() | |||
port, err := strconv.Atoi(conf.Port) | |||
if err != nil { | |||
log.Fatal(err) | |||
} | |||
common.Init(ip, port) | |||
uuid.Init(serverNode) | |||
lock.Init() | |||
holder.Init() | |||
srv := server.NewServer() | |||
srv.Start(conf.Host + ":" + conf.Port) | |||
srv.Start(fmt.Sprintf(":%s", conf.Port)) | |||
return nil | |||
}, | |||
}, | |||
@@ -16,6 +16,7 @@ require ( | |||
github.com/mattn/go-sqlite3 v2.0.1+incompatible // indirect | |||
github.com/modern-go/reflect2 v1.0.1 // indirect | |||
github.com/nacos-group/nacos-sdk-go v1.0.8 | |||
github.com/natefinch/lumberjack v2.0.0+incompatible | |||
github.com/opentrx/seata-golang/v2 v2.0.0-rc1 // indirect | |||
github.com/patrickmn/go-cache v2.1.0+incompatible | |||
github.com/pingcap/check v0.0.0-20200212061837-5e12011dc712 // indirect | |||
@@ -25,7 +26,6 @@ require ( | |||
github.com/pkg/errors v0.9.1 | |||
github.com/prometheus/client_golang v1.9.0 | |||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a | |||
github.com/shima-park/agollo v1.2.6 | |||
github.com/stretchr/testify v1.7.0 | |||
github.com/urfave/cli/v2 v2.3.0 | |||
go.uber.org/atomic v1.7.0 | |||
@@ -1,6 +0,0 @@ | |||
package constant | |||
const ( | |||
// seata客服端文件配置路径KEY | |||
CONF_CLIENT_FILE_PATH = "CONF_CLIENT_FILE_PATH" | |||
) |
@@ -1,12 +0,0 @@ | |||
package constant | |||
const ( | |||
NACOS_KEY = "nacos" | |||
) | |||
const ( | |||
FILE_KEY = "file" | |||
) | |||
const ( | |||
NACOS_DEFAULT_GROUP = "SEATA_GROUP" | |||
NACOS_DEFAULT_DATA_ID = "seata" | |||
) |
@@ -6,24 +6,18 @@ import ( | |||
"strings" | |||
) | |||
type XID struct { | |||
IpAddress string | |||
Port int | |||
} | |||
var xID = &XID{} | |||
func GetXID() *XID { | |||
return xID | |||
} | |||
var ( | |||
IPAddress string | |||
Port int | |||
) | |||
func (xid *XID) Init(ipAddress string, port int) { | |||
xid.IpAddress = ipAddress | |||
xid.Port = port | |||
func Init(ipAddress string, port int) { | |||
IPAddress = ipAddress | |||
Port = port | |||
} | |||
func (xid *XID) GenerateXID(tranID int64) string { | |||
return fmt.Sprintf("%s:%d:%d", xid.IpAddress, xid.Port, tranID) | |||
func GenerateXID(tranID int64) string { | |||
return fmt.Sprintf("%s:%d:%d", IPAddress, Port, tranID) | |||
} | |||
func GetTransactionID(xid string) int64 { | |||
@@ -37,4 +31,4 @@ func GetTransactionID(xid string) int64 { | |||
} | |||
tranID, _ := strconv.ParseInt(xid[idx+1:], 10, 64) | |||
return tranID | |||
} | |||
} |
@@ -7,10 +7,10 @@ type ConfigCenterConfig struct { | |||
type NacosConfigCenter struct { | |||
ServerAddr string `yaml:"server_addr" json:"server_addr,omitempty"` | |||
Group string `default:"SEATA_GROUP" yaml:"group" json:"group,omitempty"` | |||
Group string `default:"SEATA_GROUP" yaml:"group" json:"group,omitempty"` | |||
Namespace string `yaml:"namespace" json:"namespace,omitempty"` | |||
Cluster string `yaml:"cluster" json:"cluster,omitempty"` | |||
UserName string `yaml:"username" json:"username,omitempty"` | |||
Password string `yaml:"password" json:"password,omitempty"` | |||
DataId string `default:"seata" yaml:"data_id" json:"data_id,omitempty"` | |||
DataId string `default:"seata" yaml:"data_id" json:"data_id,omitempty"` | |||
} |
@@ -0,0 +1,19 @@ | |||
package config | |||
import ( | |||
"time" | |||
) | |||
type GettySessionParam struct { | |||
CompressEncoding bool `default:"false" yaml:"compress_encoding" json:"compress_encoding,omitempty"` | |||
TcpNoDelay bool `default:"true" yaml:"tcp_no_delay" json:"tcp_no_delay,omitempty"` | |||
TcpKeepAlive bool `default:"true" yaml:"tcp_keep_alive" json:"tcp_keep_alive,omitempty"` | |||
KeepAlivePeriod time.Duration `default:"180s" yaml:"keep_alive_period" json:"keep_alive_period,omitempty"` | |||
TcpRBufSize int `default:"262144" yaml:"tcp_r_buf_size" json:"tcp_r_buf_size,omitempty"` | |||
TcpWBufSize int `default:"65536" yaml:"tcp_w_buf_size" json:"tcp_w_buf_size,omitempty"` | |||
TcpReadTimeout time.Duration `default:"1s" yaml:"tcp_read_timeout" json:"tcp_read_timeout,omitempty"` | |||
TcpWriteTimeout time.Duration `default:"5s" yaml:"tcp_write_timeout" json:"tcp_write_timeout,omitempty"` | |||
WaitTimeout time.Duration `default:"7s" yaml:"wait_timeout" json:"wait_timeout,omitempty"` | |||
MaxMsgLen int `default:"4096" yaml:"max_msg_len" json:"max_msg_len,omitempty"` | |||
SessionName string `default:"rpc" yaml:"session_name" json:"session_name,omitempty"` | |||
} |
@@ -1,5 +1,7 @@ | |||
package config | |||
var config *RegistryConfig | |||
type RegistryConfig struct { | |||
Mode string `yaml:"type" json:"type,omitempty"` //类型 | |||
NacosConfig NacosConfig `yaml:"nacos" json:"nacos,omitempty"` | |||
@@ -14,3 +16,11 @@ type NacosConfig struct { | |||
UserName string `yaml:"username" json:"username,omitempty"` | |||
Password string `yaml:"password" json:"password,omitempty"` | |||
} | |||
func InitRegistryConfig(registryConfig *RegistryConfig) { | |||
config = registryConfig | |||
} | |||
func GetRegistryConfig() *RegistryConfig { | |||
return config | |||
} |
@@ -2,7 +2,7 @@ package config_center | |||
import "github.com/transaction-wg/seata-golang/pkg/base/config" | |||
func AddLisenter(cc DynamicConfigurationFactory, conf *config.ConfigCenterConfig, listener ConfigurationListener) { | |||
func AddListener(cc DynamicConfigurationFactory, conf *config.ConfigCenterConfig, listener ConfigurationListener) { | |||
if conf.Mode == "" { | |||
return | |||
} | |||
@@ -10,8 +10,8 @@ func AddLisenter(cc DynamicConfigurationFactory, conf *config.ConfigCenterConfig | |||
} | |||
func LoadConfigCenterConfig(cc DynamicConfigurationFactory, conf *config.ConfigCenterConfig, listener ConfigurationListener) string { | |||
confStr := cc.GetConfig(conf) | |||
//监听远程配置情况,发生变更进行配置修改 | |||
AddLisenter(cc, conf, listener) | |||
return confStr | |||
remoteConfig := cc.GetConfig(conf) | |||
// listen remote config, change config item | |||
AddListener(cc, conf, listener) | |||
return remoteConfig | |||
} |
@@ -5,5 +5,4 @@ import "github.com/transaction-wg/seata-golang/pkg/base/config" | |||
type DynamicConfigurationFactory interface { | |||
GetConfig(conf *config.ConfigCenterConfig) string //返回配置信息 | |||
AddListener(conf *config.ConfigCenterConfig, listener ConfigurationListener) //添加配置监听 | |||
} |
@@ -13,21 +13,21 @@ import ( | |||
) | |||
import ( | |||
"github.com/transaction-wg/seata-golang/pkg/base/common/constant" | |||
"github.com/transaction-wg/seata-golang/pkg/base/common/extension" | |||
baseConfig "github.com/transaction-wg/seata-golang/pkg/base/config" | |||
"github.com/transaction-wg/seata-golang/pkg/base/config" | |||
"github.com/transaction-wg/seata-golang/pkg/base/config_center" | |||
"github.com/transaction-wg/seata-golang/pkg/base/constant" | |||
"github.com/transaction-wg/seata-golang/pkg/base/extension" | |||
) | |||
func init() { | |||
extension.SetConfigCenter(constant.NACOS_KEY, newNacosConfigCenterFactory) | |||
extension.SetConfigCenter(constant.NacosKey, newNacosConfigCenterFactory) | |||
} | |||
type nacosConfigCenter struct { | |||
client config_client.IConfigClient | |||
} | |||
func (nc *nacosConfigCenter) AddListener(conf *baseConfig.ConfigCenterConfig, listener config_center.ConfigurationListener) { | |||
func (nc *nacosConfigCenter) AddListener(conf *config.ConfigCenterConfig, listener config_center.ConfigurationListener) { | |||
dataId := getDataId(conf) | |||
group := getGroup(conf) | |||
_ = nc.client.ListenConfig(vo.ConfigParam{ | |||
@@ -39,23 +39,23 @@ func (nc *nacosConfigCenter) AddListener(conf *baseConfig.ConfigCenterConfig, li | |||
}) | |||
} | |||
func getGroup(conf *baseConfig.ConfigCenterConfig) string { | |||
func getGroup(conf *config.ConfigCenterConfig) string { | |||
group := conf.NacosConfig.Group | |||
if group == "" { | |||
group = constant.NACOS_DEFAULT_GROUP | |||
group = constant.NacosDefaultGroup | |||
} | |||
return group | |||
} | |||
func getDataId(conf *baseConfig.ConfigCenterConfig) string { | |||
func getDataId(conf *config.ConfigCenterConfig) string { | |||
dataId := conf.NacosConfig.DataId | |||
if dataId == "" { | |||
dataId = constant.NACOS_DEFAULT_DATA_ID | |||
dataId = constant.NacosDefaultDataID | |||
} | |||
return dataId | |||
} | |||
func (nc *nacosConfigCenter) GetConfig(conf *baseConfig.ConfigCenterConfig) string { | |||
func (nc *nacosConfigCenter) GetConfig(conf *config.ConfigCenterConfig) string { | |||
dataId := getDataId(conf) | |||
group := getGroup(conf) | |||
config, _ := nc.client.GetConfig(vo.ConfigParam{ | |||
@@ -64,7 +64,7 @@ func (nc *nacosConfigCenter) GetConfig(conf *baseConfig.ConfigCenterConfig) stri | |||
return config | |||
} | |||
func newNacosConfigCenterFactory(conf *baseConfig.ConfigCenterConfig) (factory config_center.DynamicConfigurationFactory, e error) { | |||
func newNacosConfigCenterFactory(conf *config.ConfigCenterConfig) (factory config_center.DynamicConfigurationFactory, e error) { | |||
nacosConfig, err := getNacosConfig(conf) | |||
if err != nil { | |||
return &nacosConfigCenter{}, err | |||
@@ -80,7 +80,7 @@ func newNacosConfigCenterFactory(conf *baseConfig.ConfigCenterConfig) (factory c | |||
} | |||
//获取Nacos配置信息 | |||
func getNacosConfig(conf *baseConfig.ConfigCenterConfig) (map[string]interface{}, error) { | |||
func getNacosConfig(conf *config.ConfigCenterConfig) (map[string]interface{}, error) { | |||
configMap := make(map[string]interface{}, 2) | |||
addr := conf.NacosConfig.ServerAddr | |||
@@ -0,0 +1,5 @@ | |||
package constant | |||
const ( | |||
ClientConfigFilePath = "ConfClientFilePath" | |||
) |
@@ -0,0 +1,8 @@ | |||
package constant | |||
const ( | |||
NacosDefaultGroup = "SEATA_GROUP" | |||
NacosDefaultDataID = "seata" | |||
NacosKey = "nacos" | |||
FileKey = "file" | |||
) |
@@ -38,5 +38,4 @@ func GetRegistry(name string) (registry.Registry, error) { | |||
return nil, errors.Errorf("registry for " + name + " is not existing, make sure you have import the package.") | |||
} | |||
return registry() | |||
} |
@@ -1,50 +0,0 @@ | |||
package config | |||
import ( | |||
"time" | |||
) | |||
import ( | |||
"github.com/pkg/errors" | |||
) | |||
type GettySessionParam struct { | |||
CompressEncoding bool `default:"false" yaml:"compress_encoding" json:"compress_encoding,omitempty"` | |||
TcpNoDelay bool `default:"true" yaml:"tcp_no_delay" json:"tcp_no_delay,omitempty"` | |||
TcpKeepAlive bool `default:"true" yaml:"tcp_keep_alive" json:"tcp_keep_alive,omitempty"` | |||
KeepAlivePrd string `default:"180s" yaml:"keep_alive_period" json:"keep_alive_period,omitempty"` | |||
KeepAlivePeriod time.Duration | |||
TcpRBufSize int `default:"262144" yaml:"tcp_r_buf_size" json:"tcp_r_buf_size,omitempty"` | |||
TcpWBufSize int `default:"65536" yaml:"tcp_w_buf_size" json:"tcp_w_buf_size,omitempty"` | |||
TcpReadTmt string `default:"1s" yaml:"tcp_read_timeout" json:"tcp_read_timeout,omitempty"` | |||
TcpReadTimeout time.Duration | |||
TcpWriteTmt string `default:"5s" yaml:"tcp_write_timeout" json:"tcp_write_timeout,omitempty"` | |||
TcpWriteTimeout time.Duration | |||
WaitTmt string `default:"7s" yaml:"wait_timeout" json:"wait_timeout,omitempty"` | |||
WaitTimeout time.Duration | |||
MaxMsgLen int `default:"4096" yaml:"max_msg_len" json:"max_msg_len,omitempty"` | |||
SessionName string `default:"rpc" yaml:"session_name" json:"session_name,omitempty"` | |||
} | |||
// CheckValidity ... | |||
func (c *GettySessionParam) CheckValidity() error { | |||
var err error | |||
if c.KeepAlivePeriod, err = time.ParseDuration(c.KeepAlivePrd); err != nil { | |||
return errors.WithMessagef(err, "time.ParseDuration(KeepAlivePeriod{%#v})", c.KeepAlivePrd) | |||
} | |||
if c.TcpReadTimeout, err = time.ParseDuration(c.TcpReadTmt); err != nil { | |||
return errors.WithMessagef(err, "time.ParseDuration(TcpReadTimeout{%#v})", c.TcpReadTmt) | |||
} | |||
if c.TcpWriteTimeout, err = time.ParseDuration(c.TcpWriteTmt); err != nil { | |||
return errors.WithMessagef(err, "time.ParseDuration(TcpWriteTimeout{%#v})", c.TcpWriteTmt) | |||
} | |||
if c.WaitTimeout, err = time.ParseDuration(c.WaitTmt); err != nil { | |||
return errors.WithMessagef(err, "time.ParseDuration(WaitTimeout{%#v})", c.WaitTmt) | |||
} | |||
return nil | |||
} |
@@ -42,10 +42,10 @@ const ( | |||
GlobalStatusRollbackRetrying | |||
/** | |||
* The Timeout rollbacking. | |||
* The Timeout rollingBack. | |||
*/ | |||
// Rollbacking since timeout | |||
GlobalStatusTimeoutRollbacking | |||
// RollingBack since timeout | |||
GlobalStatusTimeoutRollingBack | |||
/** | |||
* The Timeout rollback retrying. | |||
@@ -116,7 +116,7 @@ func (s GlobalStatus) String() string { | |||
return "Rollbacking" | |||
case GlobalStatusRollbackRetrying: | |||
return "RollbackRetrying" | |||
case GlobalStatusTimeoutRollbacking: | |||
case GlobalStatusTimeoutRollingBack: | |||
return "TimeoutRollbacking" | |||
case GlobalStatusTimeoutRollbackRetrying: | |||
return "TimeoutRollbackRetrying" | |||
@@ -5,22 +5,21 @@ import ( | |||
) | |||
import ( | |||
"github.com/transaction-wg/seata-golang/pkg/base/common/constant" | |||
"github.com/transaction-wg/seata-golang/pkg/base/common/extension" | |||
"github.com/transaction-wg/seata-golang/pkg/base/constant" | |||
"github.com/transaction-wg/seata-golang/pkg/base/extension" | |||
"github.com/transaction-wg/seata-golang/pkg/base/registry" | |||
"github.com/transaction-wg/seata-golang/pkg/client/config" | |||
"github.com/transaction-wg/seata-golang/pkg/util/log" | |||
) | |||
func init() { | |||
extension.SetRegistry(constant.FILE_KEY, newFileRegistry) | |||
extension.SetRegistry(constant.FileKey, newFileRegistry) | |||
} | |||
type fileRegistry struct { | |||
} | |||
func (nr *fileRegistry) Register(addr *registry.Address) error { | |||
//文件不需要注册 | |||
log.Info("file register") | |||
return nil | |||
} | |||
@@ -28,10 +27,12 @@ func (nr *fileRegistry) Register(addr *registry.Address) error { | |||
func (nr *fileRegistry) UnRegister(addr *registry.Address) error { | |||
return nil | |||
} | |||
func (nr *fileRegistry) Lookup() ([]string, error) { | |||
addressList := strings.Split(config.GetClientConfig().TransactionServiceGroup, ",") | |||
return addressList, nil | |||
} | |||
func (nr *fileRegistry) Subscribe(notifyListener registry.EventListener) error { | |||
return nil | |||
} | |||
@@ -40,9 +41,7 @@ func (nr *fileRegistry) UnSubscribe(notifyListener registry.EventListener) error | |||
return nil | |||
} | |||
// newNacosRegistry will create new instance | |||
func newFileRegistry() (registry.Registry, error) { | |||
tmpRegistry := &fileRegistry{} | |||
return tmpRegistry, nil | |||
} |
@@ -15,45 +15,45 @@ import ( | |||
"github.com/pkg/errors" | |||
) | |||
import ( | |||
"github.com/transaction-wg/seata-golang/pkg/base/common/constant" | |||
"github.com/transaction-wg/seata-golang/pkg/base/common/extension" | |||
"github.com/transaction-wg/seata-golang/pkg/base/config" | |||
"github.com/transaction-wg/seata-golang/pkg/base/constant" | |||
"github.com/transaction-wg/seata-golang/pkg/base/extension" | |||
"github.com/transaction-wg/seata-golang/pkg/base/registry" | |||
clientConfig "github.com/transaction-wg/seata-golang/pkg/client/config" | |||
"github.com/transaction-wg/seata-golang/pkg/tc/config" | |||
"github.com/transaction-wg/seata-golang/pkg/util/log" | |||
) | |||
func init() { | |||
extension.SetRegistry(constant.NACOS_KEY, newNacosRegistry) | |||
extension.SetRegistry(constant.NacosKey, newNacosRegistry) | |||
} | |||
type nacosRegistry struct { | |||
namingClient naming_client.INamingClient | |||
registryConfig *config.RegistryConfig | |||
namingClient naming_client.INamingClient | |||
} | |||
type nacosEventListener struct { | |||
} | |||
func (nr *nacosEventListener) OnEvent(service []*registry.Service) error { | |||
data, err := json.Marshal(service) | |||
log.Info("servie info change:" + string(data)) | |||
log.Info("service info change:" + string(data)) | |||
return err | |||
} | |||
func (nr *nacosRegistry) Register(addr *registry.Address) error { | |||
registryConfig := config.GetRegistryConfig() | |||
param := createRegisterParam(registryConfig, addr) | |||
func (nr *nacosRegistry) Register(addr *registry.Address) error { | |||
param := createRegisterParam(nr.registryConfig, addr) | |||
isRegistry, err := nr.namingClient.RegisterInstance(param) | |||
if err != nil { | |||
return err | |||
} | |||
if !isRegistry { | |||
return errors.Errorf("registry [" + registryConfig.NacosConfig.Application + "] to nacos failed") | |||
return errors.Errorf("registry [" + nr.registryConfig.NacosConfig.Application + "] to nacos failed") | |||
} | |||
return nil | |||
} | |||
//创建服务注册信息 | |||
func createRegisterParam(registryConfig config.RegistryConfig, addr *registry.Address) vo.RegisterInstanceParam { | |||
func createRegisterParam(registryConfig *config.RegistryConfig, addr *registry.Address) vo.RegisterInstanceParam { | |||
serviceName := registryConfig.NacosConfig.Application | |||
params := make(map[string]string) | |||
@@ -78,11 +78,10 @@ func (nr *nacosRegistry) UnRegister(addr *registry.Address) error { | |||
//noinspection ALL | |||
func (nr *nacosRegistry) Lookup() ([]string, error) { | |||
registryConfig := clientConfig.GetRegistryConfig() | |||
clusterName := registryConfig.NacosConfig.Cluster | |||
clusterName := nr.registryConfig.NacosConfig.Cluster | |||
instances, err := nr.namingClient.SelectInstances(vo.SelectInstancesParam{ | |||
ServiceName: registryConfig.NacosConfig.Application, | |||
GroupName: registryConfig.NacosConfig.Group, // default value is DEFAULT_GROUP | |||
ServiceName: nr.registryConfig.NacosConfig.Application, | |||
GroupName: nr.registryConfig.NacosConfig.Group, // default value is DEFAULT_GROUP | |||
Clusters: []string{clusterName}, // default value is DEFAULT | |||
HealthyOnly: true, | |||
}) | |||
@@ -97,12 +96,12 @@ func (nr *nacosRegistry) Lookup() ([]string, error) { | |||
nr.Subscribe(&nacosEventListener{}) | |||
return addrs, nil | |||
} | |||
func (nr *nacosRegistry) Subscribe(notifyListener registry.EventListener) error { | |||
registryConfig := clientConfig.GetRegistryConfig() | |||
clusterName := registryConfig.NacosConfig.Cluster | |||
clusterName := nr.registryConfig.NacosConfig.Cluster | |||
err := nr.namingClient.Subscribe(&vo.SubscribeParam{ | |||
ServiceName: registryConfig.NacosConfig.Application, | |||
GroupName: registryConfig.NacosConfig.Group, // default value is DEFAULT_GROUP | |||
ServiceName: nr.registryConfig.NacosConfig.Application, | |||
GroupName: nr.registryConfig.NacosConfig.Group, // default value is DEFAULT_GROUP | |||
Clusters: []string{clusterName}, // default value is DEFAULT | |||
SubscribeCallback: func(services []model.SubscribeService, err error) { | |||
serviceList := make([]*registry.Service, 0, len(services)) | |||
@@ -134,18 +133,17 @@ func newNacosRegistry() (registry.Registry, error) { | |||
if err != nil { | |||
return &nacosRegistry{}, err | |||
} | |||
tmpRegistry := &nacosRegistry{ | |||
namingClient: client, | |||
registry := &nacosRegistry{ | |||
registryConfig: config.GetRegistryConfig(), | |||
namingClient: client, | |||
} | |||
return tmpRegistry, nil | |||
return registry, nil | |||
} | |||
//获取Nacos配置信息 | |||
func getNacosConfig() (map[string]interface{}, error) { | |||
registryConfig := config.GetRegistryConfig() | |||
if registryConfig.Mode == "" { | |||
registryConfig = clientConfig.GetRegistryConfig() | |||
} | |||
configMap := make(map[string]interface{}, 2) | |||
addr := registryConfig.NacosConfig.ServerAddr | |||
@@ -142,7 +142,7 @@ func (resourceManager DataSourceManager) doBranchCommit(request protocal.BranchC | |||
func (resourceManager DataSourceManager) doBranchRollback(request protocal.BranchRollbackRequest) protocal.BranchRollbackResponse { | |||
var resp = protocal.BranchRollbackResponse{} | |||
log.Infof("Branch rollbacking: %s %d %s", request.XID, request.BranchID, request.ResourceID) | |||
log.Infof("Branch rolling back: %s %d %s", request.XID, request.BranchID, request.ResourceID) | |||
status, err := resourceManager.BranchRollback(request.BranchType, request.XID, request.BranchID, request.ResourceID, request.ApplicationData) | |||
if err != nil { | |||
resp.ResultCode = protocal.ResultCodeFailed | |||
@@ -1,27 +0,0 @@ | |||
package config | |||
import ( | |||
"time" | |||
) | |||
import ( | |||
"github.com/pkg/errors" | |||
) | |||
type ATConfig struct { | |||
DSN string `yaml:"dsn" json:"dsn,omitempty"` | |||
ReportRetryCount int `default:"5" yaml:"report_retry_count" json:"report_retry_count,omitempty"` | |||
ReportSuccessEnable bool `default:"false" yaml:"report_success_enable" json:"report_success_enable,omitempty"` | |||
LockRetryItv string `default:"10ms" yaml:"lock_retry_interval" json:"lock_retry_interval,omitempty"` | |||
LockRetryInterval time.Duration | |||
LockRetryTimes int `default:"30" yaml:"lock_retry_times" json:"lock_retry_times,omitempty"` | |||
} | |||
func (c *ATConfig) CheckValidity() error { | |||
var err error | |||
if c.LockRetryInterval, err = time.ParseDuration(c.LockRetryItv); err != nil { | |||
return errors.WithMessagef(err, "time.ParseDuration(LockRetryInterval{%#v})", c.LockRetryItv) | |||
} | |||
return nil | |||
} |
@@ -1,54 +1,49 @@ | |||
package config | |||
import ( | |||
"flag" | |||
"fmt" | |||
"io" | |||
"io/ioutil" | |||
"os" | |||
"path" | |||
"time" | |||
) | |||
import ( | |||
"github.com/imdario/mergo" | |||
"github.com/pkg/errors" | |||
"github.com/shima-park/agollo" | |||
"gopkg.in/yaml.v2" | |||
) | |||
import ( | |||
"github.com/transaction-wg/seata-golang/pkg/base/common/constant" | |||
"github.com/transaction-wg/seata-golang/pkg/base/common/extension" | |||
baseConfig "github.com/transaction-wg/seata-golang/pkg/base/config" | |||
"github.com/transaction-wg/seata-golang/pkg/base/config" | |||
"github.com/transaction-wg/seata-golang/pkg/base/config_center" | |||
"github.com/transaction-wg/seata-golang/pkg/tc/config" | |||
"github.com/transaction-wg/seata-golang/pkg/base/extension" | |||
"github.com/transaction-wg/seata-golang/pkg/util/log" | |||
"github.com/transaction-wg/seata-golang/pkg/util/parser" | |||
) | |||
var clientConfig *ClientConfig | |||
type ClientConfig struct { | |||
ApplicationID string `yaml:"application_id" json:"application_id,omitempty"` | |||
TransactionServiceGroup string `yaml:"transaction_service_group" json:"transaction_service_group,omitempty"` | |||
EnableClientBatchSendRequest bool `yaml:"enable-rpc_client-batch-send-request" json:"enable-rpc_client-batch-send-request,omitempty"` | |||
SeataVersion string `yaml:"seata_version" json:"seata_version,omitempty"` | |||
GettyConfig GettyConfig `yaml:"getty" json:"getty,omitempty"` | |||
TMConfig TMConfig `yaml:"tm" json:"tm,omitempty"` | |||
ATConfig ATConfig `yaml:"at" json:"at,omitempty"` | |||
RegistryConfig config.RegistryConfig `yaml:"registry_config" json:"registry_config,omitempty"` //注册中心配置信息 | |||
ConfigCenterConfig baseConfig.ConfigCenterConfig `yaml:"config_center" json:"config_center,omitempty"` //配置中心配置信息 | |||
ApplicationID string `yaml:"application_id" json:"application_id,omitempty"` | |||
TransactionServiceGroup string `yaml:"transaction_service_group" json:"transaction_service_group,omitempty"` | |||
EnableClientBatchSendRequest bool `yaml:"enable-rpc_client-batch-send-request" json:"enable-rpc_client-batch-send-request,omitempty"` | |||
SeataVersion string `yaml:"seata_version" json:"seata_version,omitempty"` | |||
GettyConfig GettyConfig `yaml:"getty" json:"getty,omitempty"` | |||
TMConfig TMConfig `yaml:"tm" json:"tm,omitempty"` | |||
ATConfig struct { | |||
DSN string `yaml:"dsn" json:"dsn,omitempty"` | |||
ReportRetryCount int `default:"5" yaml:"report_retry_count" json:"report_retry_count,omitempty"` | |||
ReportSuccessEnable bool `default:"false" yaml:"report_success_enable" json:"report_success_enable,omitempty"` | |||
LockRetryInterval time.Duration `default:"10ms" yaml:"lock_retry_interval" json:"lock_retry_interval,omitempty"` | |||
LockRetryTimes int `default:"30" yaml:"lock_retry_times" json:"lock_retry_times,omitempty"` | |||
} `yaml:"at" json:"at,omitempty"` | |||
RegistryConfig config.RegistryConfig `yaml:"registry_config" json:"registry_config,omitempty"` //注册中心配置信息 | |||
ConfigCenterConfig config.ConfigCenterConfig `yaml:"config_center" json:"config_center,omitempty"` //配置中心配置信息 | |||
} | |||
var clientConfig ClientConfig | |||
var ( | |||
confFile string | |||
) | |||
func init() { | |||
fs := flag.NewFlagSet("config", flag.ContinueOnError) | |||
fs.StringVar(&confFile, "conConf", os.Getenv(constant.CONF_CLIENT_FILE_PATH), "default client config path") | |||
} | |||
func GetRegistryConfig() config.RegistryConfig { | |||
return clientConfig.RegistryConfig | |||
} | |||
func GetClientConfig() ClientConfig { | |||
func GetClientConfig() *ClientConfig { | |||
return clientConfig | |||
} | |||
@@ -56,104 +51,88 @@ func GetTMConfig() TMConfig { | |||
return clientConfig.TMConfig | |||
} | |||
func GetATConfig() ATConfig { | |||
return clientConfig.ATConfig | |||
} | |||
func GetDefaultClientConfig(applicationID string) ClientConfig { | |||
return ClientConfig{ | |||
ApplicationID: applicationID, | |||
SeataVersion: "1.1.0", | |||
TransactionServiceGroup: "127.0.0.1:8091", | |||
GettyConfig: GetDefaultGettyConfig(), | |||
TMConfig: GetDefaultTmConfig(), | |||
ApplicationID: applicationID, | |||
TransactionServiceGroup: "127.0.0.1:8091", | |||
EnableClientBatchSendRequest: false, | |||
SeataVersion: "1.1.0", | |||
GettyConfig: GetDefaultGettyConfig(), | |||
TMConfig: GetDefaultTmConfig(), | |||
} | |||
} | |||
func InitConf() error { | |||
var err error | |||
if confFile == "" { | |||
return errors.New(fmt.Sprintf("application configure file name is nil")) | |||
} | |||
if path.Ext(confFile) != ".yml" { | |||
return errors.New(fmt.Sprintf("application configure file name{%v} suffix must be .yml", confFile)) | |||
} | |||
clientConfig = ClientConfig{} | |||
confFileStream, err := ioutil.ReadFile(confFile) | |||
// Parse parses an input configuration yaml document into a Configuration struct | |||
// | |||
// Environment variables may be used to override configuration parameters other than version, | |||
// following the scheme below: | |||
// Configuration.Abc may be replaced by the value of SEATA_ABC, | |||
// Configuration.Abc.Xyz may be replaced by the value of SEATA_ABC_XYZ, and so forth | |||
func parse(rd io.Reader) (*ClientConfig, error) { | |||
in, err := ioutil.ReadAll(rd) | |||
if err != nil { | |||
return errors.WithMessagef(err, fmt.Sprintf("ioutil.ReadFile(file:%s) = error:%s", confFile, err)) | |||
return nil, err | |||
} | |||
err = yaml.Unmarshal(confFileStream, &clientConfig) | |||
p := parser.NewParser("seata") | |||
config := new(ClientConfig) | |||
err = p.Parse(in, config) | |||
if err != nil { | |||
return errors.WithMessagef(err, fmt.Sprintf("yaml.Unmarshal() = error:%s", err)) | |||
return nil, err | |||
} | |||
//加载获取远程配置 | |||
loadConfigCenterConfig(&clientConfig.ConfigCenterConfig) | |||
(&clientConfig).GettyConfig.CheckValidity() | |||
(&clientConfig).ATConfig.CheckValidity() | |||
return nil | |||
return config, nil | |||
} | |||
func loadConfigCenterConfig(conf *baseConfig.ConfigCenterConfig) { | |||
if conf.Mode == "" { | |||
func loadConfigCenterConfig(config *ClientConfig) { | |||
if config.ConfigCenterConfig.Mode == "" { | |||
return | |||
} | |||
cc, err := extension.GetConfigCenter(conf.Mode, conf) | |||
cc, err := extension.GetConfigCenter(config.ConfigCenterConfig.Mode, &config.ConfigCenterConfig) | |||
if err != nil { | |||
log.Error("ConfigCenter can not connect success.Error message is %s", err.Error()) | |||
} | |||
confStr := config_center.LoadConfigCenterConfig(cc, conf, &ClientConfigListener{}) | |||
updateConf(&clientConfig, confStr) | |||
} | |||
func updateConf(config *ClientConfig, confStr string) { | |||
newConf := &ClientConfig{} | |||
confByte := []byte(confStr) | |||
yaml.Unmarshal(confByte, newConf) | |||
//合并配置中心的配置和本地文件的配置,形成新的配置 | |||
if err := mergo.Merge(config, newConf, mergo.WithOverride); err != nil { | |||
log.Error("merge config fail %s ", err.Error()) | |||
} | |||
remoteConfig := config_center.LoadConfigCenterConfig(cc, &config.ConfigCenterConfig, &ClientConfigListener{}) | |||
updateConf(clientConfig, remoteConfig) | |||
} | |||
type ClientConfigListener struct { | |||
} | |||
func (ClientConfigListener) Process(event *config_center.ConfigChangeEvent) { | |||
//更新conf | |||
conf := GetClientConfig() | |||
updateConf(&conf, event.Value.(string)) | |||
updateConf(conf, event.Value.(string)) | |||
} | |||
func InitConfWithDefault(applicationID string) { | |||
clientConfig = GetDefaultClientConfig(applicationID) | |||
(&clientConfig).GettyConfig.CheckValidity() | |||
func updateConf(config *ClientConfig, remoteConfig string) { | |||
newConf := &ClientConfig{} | |||
confByte := []byte(remoteConfig) | |||
yaml.Unmarshal(confByte, newConf) | |||
if err := mergo.Merge(config, newConf, mergo.WithOverride); err != nil { | |||
log.Error("merge config fail %s ", err.Error()) | |||
} | |||
} | |||
func InitApolloConf(serverAddr string, appID string, nameSpace string) error { | |||
a, err := agollo.New(serverAddr, appID, agollo.AutoFetchOnCacheMiss()) | |||
// InitConf init ClientConfig from a file path | |||
func InitConf(confFile string) error { | |||
fp, err := os.Open(confFile) | |||
if err != nil { | |||
return errors.WithMessagef(err, fmt.Sprintf("get etcd error:%s", err)) | |||
log.Fatalf("open configuration file fail, %v", err) | |||
} | |||
var config = a.Get("content", agollo.WithNamespace(nameSpace)) | |||
return initCommonConf([]byte(config)) | |||
} | |||
defer fp.Close() | |||
func initCommonConf(confStream []byte) error { | |||
var err error | |||
err = yaml.Unmarshal(confStream, &clientConfig) | |||
fmt.Println("config", clientConfig) | |||
conf, err := parse(fp) | |||
if err != nil { | |||
return errors.WithMessagef(err, fmt.Sprintf("yaml.Unmarshal() = error:%s", err)) | |||
log.Fatalf("error parsing %s: %v", confFile, err) | |||
} | |||
(&clientConfig).GettyConfig.CheckValidity() | |||
(&clientConfig).ATConfig.CheckValidity() | |||
loadConfigCenterConfig(conf) | |||
config.InitRegistryConfig(&conf.RegistryConfig) | |||
clientConfig = conf | |||
return nil | |||
} |
@@ -1,17 +1,10 @@ | |||
package config | |||
import ( | |||
config2 "github.com/transaction-wg/seata-golang/pkg/base/config" | |||
"time" | |||
) | |||
import ( | |||
"github.com/pkg/errors" | |||
) | |||
import ( | |||
"github.com/transaction-wg/seata-golang/pkg/base/getty/config" | |||
) | |||
// GettyConfig | |||
//Config holds supported types by the multiconfig package | |||
type GettyConfig struct { | |||
@@ -20,40 +13,29 @@ type GettyConfig struct { | |||
ConnectionNum int `default:"16" yaml:"connection_number" json:"connection_number,omitempty"` | |||
// heartbeat | |||
HeartbeatPrd string `default:"15s" yaml:"heartbeat_period" json:"heartbeat_period,omitempty"` | |||
HeartbeatPeriod time.Duration | |||
HeartbeatPeriod time.Duration `default:"15s" yaml:"heartbeat_period" json:"heartbeat_period,omitempty"` | |||
// getty_session tcp parameters | |||
GettySessionParam config.GettySessionParam `required:"true" yaml:"getty_session_param" json:"getty_session_param,omitempty"` | |||
GettySessionParam config2.GettySessionParam `required:"true" yaml:"getty_session_param" json:"getty_session_param,omitempty"` | |||
} | |||
// CheckValidity ... | |||
func (c *GettyConfig) CheckValidity() error { | |||
var err error | |||
if c.HeartbeatPeriod, err = time.ParseDuration(c.HeartbeatPrd); err != nil { | |||
return errors.WithMessagef(err, "time.ParseDuration(HeartbeatPeriod{%#v})", c.HeartbeatPrd) | |||
} | |||
return errors.WithStack(c.GettySessionParam.CheckValidity()) | |||
} | |||
// GetDefaultGettyConfig ... | |||
func GetDefaultGettyConfig() GettyConfig { | |||
return GettyConfig{ | |||
ReconnectInterval: 0, | |||
ConnectionNum: 1, | |||
HeartbeatPrd: "10s", | |||
GettySessionParam: config.GettySessionParam{ | |||
HeartbeatPeriod: 10 * time.Second, | |||
GettySessionParam: config2.GettySessionParam{ | |||
CompressEncoding: false, | |||
TcpNoDelay: true, | |||
TcpKeepAlive: true, | |||
KeepAlivePrd: "180s", | |||
KeepAlivePeriod: 180 * time.Second, | |||
TcpRBufSize: 262144, | |||
TcpWBufSize: 65536, | |||
TcpReadTmt: "1s", | |||
TcpWriteTmt: "5s", | |||
WaitTmt: "1s", | |||
TcpReadTimeout: time.Second, | |||
TcpWriteTimeout: 5 * time.Second, | |||
WaitTimeout: time.Second, | |||
MaxMsgLen: 4096, | |||
SessionName: "rpc_client", | |||
}, | |||
@@ -12,7 +12,7 @@ import ( | |||
) | |||
import ( | |||
"github.com/transaction-wg/seata-golang/pkg/base/common/extension" | |||
"github.com/transaction-wg/seata-golang/pkg/base/extension" | |||
"github.com/transaction-wg/seata-golang/pkg/base/getty/readwriter" | |||
"github.com/transaction-wg/seata-golang/pkg/client/config" | |||
getty2 "github.com/transaction-wg/seata-golang/pkg/client/rpc_client" | |||
@@ -20,7 +20,7 @@ import ( | |||
) | |||
type RpcClient struct { | |||
conf config.ClientConfig | |||
conf *config.ClientConfig | |||
gettyClients []getty.Client | |||
rpcHandler *getty2.RpcRemoteClient | |||
} | |||
@@ -36,9 +36,7 @@ func NewRpcClient() *RpcClient { | |||
} | |||
func (c *RpcClient) init() { | |||
//addressList := strings.Split(c.conf.TransactionServiceGroup, ",") | |||
//通过注册中心获取服务地址信息 | |||
addressList := getAvailServerList() | |||
addressList := getAvailServerList(c.conf) | |||
if len(addressList) == 0 { | |||
log.Warn("no have valid seata server list") | |||
} | |||
@@ -54,9 +52,8 @@ func (c *RpcClient) init() { | |||
} | |||
} | |||
func getAvailServerList() []string { | |||
registryConfig := config.GetRegistryConfig() | |||
reg, err := extension.GetRegistry(registryConfig.Mode) | |||
func getAvailServerList(config *config.ClientConfig) []string { | |||
reg, err := extension.GetRegistry(config.RegistryConfig.Mode) | |||
if err != nil { | |||
logger.Errorf("Registry can not connect success, program is going to panic.Error message is %s", err.Error()) | |||
panic(err.Error()) | |||
@@ -50,7 +50,7 @@ func GetRpcRemoteClient() *RpcRemoteClient { | |||
} | |||
type RpcRemoteClient struct { | |||
conf config.ClientConfig | |||
conf *config.ClientConfig | |||
idGenerator *atomic.Uint32 | |||
futures *sync.Map | |||
mergeMsgMap *sync.Map | |||
@@ -176,7 +176,7 @@ func (resourceManager TCCResourceManager) doBranchCommit(request protocal.Branch | |||
func (resourceManager TCCResourceManager) doBranchRollback(request protocal.BranchRollbackRequest) protocal.BranchRollbackResponse { | |||
var resp = protocal.BranchRollbackResponse{} | |||
log.Infof("Branch rollbacking: %s %d %s", request.XID, request.BranchID, request.ResourceID) | |||
log.Infof("Branch rolling back: %s %d %s", request.XID, request.BranchID, request.ResourceID) | |||
status, err := resourceManager.BranchRollback(request.BranchType, request.XID, request.BranchID, request.ResourceID, request.ApplicationData) | |||
if err != nil { | |||
resp.ResultCode = protocal.ResultCodeFailed | |||
@@ -1,63 +0,0 @@ | |||
package tm | |||
import ( | |||
"context" | |||
"github.com/transaction-wg/seata-golang/pkg/client" | |||
"testing" | |||
) | |||
import ( | |||
"github.com/stretchr/testify/assert" | |||
) | |||
import ( | |||
"github.com/transaction-wg/seata-golang/pkg/client/config" | |||
) | |||
type Dog struct { | |||
} | |||
type Cat struct { | |||
} | |||
type ZooService struct { | |||
} | |||
func (svc *ZooService) ManageAnimal(ctx context.Context, dog *Dog, cat *Cat) (bool, error) { | |||
return true, nil | |||
} | |||
type TestService struct { | |||
*ZooService | |||
ManageAnimal func(ctx context.Context, dog *Dog, cat *Cat) (bool, error) | |||
} | |||
var methodTransactionInfo = make(map[string]*TransactionInfo) | |||
func init() { | |||
methodTransactionInfo["ManageAnimal"] = &TransactionInfo{ | |||
TimeOut: 60000, | |||
Name: "", | |||
Propagation: REQUIRED, | |||
} | |||
} | |||
func (svc *TestService) GetProxyService() interface{} { | |||
return svc.ZooService | |||
} | |||
func (svc *TestService) GetMethodTransactionInfo(methodName string) *TransactionInfo { | |||
return methodTransactionInfo[methodName] | |||
} | |||
func TestProxy_Implement(t *testing.T) { | |||
config.InitConfWithDefault("testService") | |||
client.NewRpcClient() | |||
zooSvc := &ZooService{} | |||
ts := &TestService{ZooService: zooSvc} | |||
Implement(ts) | |||
result, err := ts.ManageAnimal(context.Background(), &Dog{}, &Cat{}) | |||
assert.True(t, result) | |||
assert.Nil(t, err) | |||
} |
@@ -1,60 +0,0 @@ | |||
package config | |||
import ( | |||
"time" | |||
) | |||
import ( | |||
getty "github.com/apache/dubbo-getty" | |||
"github.com/pkg/errors" | |||
) | |||
import ( | |||
"github.com/transaction-wg/seata-golang/pkg/base/getty/config" | |||
) | |||
// Config holds supported types by the multiconfig package | |||
type GettyConfig struct { | |||
// getty_session | |||
Session_Timeout string `default:"60s" yaml:"session_timeout" json:"session_timeout,omitempty"` | |||
SessionTimeout time.Duration | |||
// getty_session tcp parameters | |||
GettySessionParam config.GettySessionParam `required:"true" yaml:"getty_session_param" json:"getty_session_param,omitempty"` | |||
} | |||
// CheckValidity ... | |||
func (c *GettyConfig) CheckValidity() error { | |||
var err error | |||
if c.SessionTimeout, err = time.ParseDuration(c.Session_Timeout); err != nil { | |||
return errors.WithMessagef(err, "time.ParseDuration(SessionTimeout{%#v})", c.SessionTimeout) | |||
} | |||
if c.SessionTimeout >= time.Duration(getty.MaxWheelTimeSpan) { | |||
return errors.WithMessagef(err, "session_timeout %s should be less than %s", | |||
c.SessionTimeout, time.Duration(getty.MaxWheelTimeSpan)) | |||
} | |||
return errors.WithStack(c.GettySessionParam.CheckValidity()) | |||
} | |||
// GetDefaultGettyConfig ... | |||
func GetDefaultGettyConfig() GettyConfig { | |||
return GettyConfig{ | |||
Session_Timeout: "180s", | |||
GettySessionParam: config.GettySessionParam{ | |||
CompressEncoding: false, | |||
TcpNoDelay: true, | |||
TcpKeepAlive: true, | |||
KeepAlivePrd: "180s", | |||
TcpRBufSize: 262144, | |||
TcpWBufSize: 65536, | |||
TcpReadTmt: "1s", | |||
TcpWriteTmt: "5s", | |||
WaitTmt: "1s", | |||
MaxMsgLen: 102400, | |||
SessionName: "server", | |||
}, | |||
} | |||
} |
@@ -2,161 +2,159 @@ package config | |||
import ( | |||
"fmt" | |||
"github.com/go-xorm/xorm" | |||
"io" | |||
"io/ioutil" | |||
"os" | |||
"path" | |||
"time" | |||
) | |||
import ( | |||
"github.com/go-xorm/xorm" | |||
getty "github.com/apache/dubbo-getty" | |||
"github.com/imdario/mergo" | |||
"github.com/pkg/errors" | |||
"gopkg.in/yaml.v2" | |||
) | |||
import ( | |||
"github.com/transaction-wg/seata-golang/pkg/base/common/extension" | |||
baseConfig "github.com/transaction-wg/seata-golang/pkg/base/config" | |||
"github.com/transaction-wg/seata-golang/pkg/base/config" | |||
"github.com/transaction-wg/seata-golang/pkg/base/config_center" | |||
"github.com/transaction-wg/seata-golang/pkg/base/extension" | |||
"github.com/transaction-wg/seata-golang/pkg/util/log" | |||
"github.com/transaction-wg/seata-golang/pkg/util/parser" | |||
) | |||
var ( | |||
conf ServerConfig | |||
) | |||
func GetServerConfig() ServerConfig { | |||
return conf | |||
} | |||
func GetStoreConfig() StoreConfig { | |||
return conf.StoreConfig | |||
} | |||
func GetRegistryConfig() RegistryConfig { | |||
return conf.RegistryConfig | |||
} | |||
func GetConfigCenterConfig() baseConfig.ConfigCenterConfig { | |||
return conf.ConfigCenterConfig | |||
} | |||
var serverConfig *ServerConfig | |||
type ServerConfig struct { | |||
Host string `yaml:"host" default:"127.0.0.1" json:"host,omitempty"` | |||
Port string `default:"8091" yaml:"port" json:"port,omitempty"` | |||
MaxRollbackRetryTimeout int64 `default:"-1" yaml:"max_rollback_retry_timeout" json:"max_rollback_retry_timeout,omitempty"` | |||
RollbackRetryTimeoutUnlockEnable bool `default:"false" yaml:"rollback_retry_timeout_unlock_enable" json:"rollback_retry_timeout_unlock_enable,omitempty"` | |||
MaxCommitRetryTimeout int64 `default:"-1" yaml:"max_commit_retry_timeout" json:"max_commit_retry_timeout,omitempty"` | |||
Timeout_Retry_Period string `default:"1s" yaml:"timeout_retry_period" json:"timeout_retry_period,omitempty"` | |||
TimeoutRetryPeriod time.Duration | |||
Rollbacking_Retry_Period string `default:"1s" yaml:"rollbacking_retry_period" json:"rollbacking_retry_period,omitempty"` | |||
RollbackingRetryPeriod time.Duration | |||
Committing_Retry_Period string `default:"1s" yaml:"committing_retry_period" json:"committing_retry_period,omitempty"` | |||
CommittingRetryPeriod time.Duration | |||
Async_Committing_Retry_Period string `default:"1s" yaml:"async_committing_retry_period" json:"async_committing_retry_period,omitempty"` | |||
AsyncCommittingRetryPeriod time.Duration | |||
Log_Delete_Period string `default:"24h" yaml:"log_delete_period" json:"log_delete_period,omitempty"` | |||
LogDeletePeriod time.Duration | |||
GettyConfig GettyConfig `required:"true" yaml:"getty_config" json:"getty_config,omitempty"` | |||
UndoConfig UndoConfig `required:"true" yaml:"undo_config" json:"undo_config,omitempty"` | |||
StoreConfig StoreConfig `required:"true" yaml:"store_config" json:"store_config,omitempty"` | |||
RegistryConfig RegistryConfig `yaml:"registry_config" json:"registry_config,omitempty"` //注册中心配置信息 | |||
ConfigCenterConfig baseConfig.ConfigCenterConfig `yaml:"config_center" json:"config_center,omitempty"` //配置中心配置信息 | |||
} | |||
func (c *ServerConfig) CheckValidity() error { | |||
var err error | |||
if conf.TimeoutRetryPeriod, err = time.ParseDuration(conf.Timeout_Retry_Period); err != nil { | |||
return errors.WithMessagef(err, "time.ParseDuration(Timeout_Retry_Period{%#v})", conf.Timeout_Retry_Period) | |||
} | |||
if conf.RollbackingRetryPeriod, err = time.ParseDuration(conf.Rollbacking_Retry_Period); err != nil { | |||
return errors.WithMessagef(err, "time.ParseDuration(Rollbacking_Retry_Period{%#v})", conf.Rollbacking_Retry_Period) | |||
} | |||
TimeoutRetryPeriod time.Duration `default:"1s" yaml:"timeout_retry_period" json:"timeout_retry_period,omitempty"` | |||
RollingBackRetryPeriod time.Duration `default:"1s" yaml:"rolling_back_retry_period" json:"rolling_back_retry_period,omitempty"` | |||
CommittingRetryPeriod time.Duration `default:"1s" yaml:"committing_retry_period" json:"committing_retry_period,omitempty"` | |||
AsyncCommittingRetryPeriod time.Duration `default:"1s" yaml:"async_committing_retry_period" json:"async_committing_retry_period,omitempty"` | |||
LogDeletePeriod time.Duration `default:"24h" yaml:"log_delete_period" json:"log_delete_period,omitempty"` | |||
if conf.CommittingRetryPeriod, err = time.ParseDuration(conf.Committing_Retry_Period); err != nil { | |||
return errors.WithMessagef(err, "time.ParseDuration(Committing_Retry_Period{%#v})", conf.Committing_Retry_Period) | |||
} | |||
GettyConfig struct { | |||
SessionTimeout time.Duration `default:"60s" yaml:"session_timeout" json:"session_timeout,omitempty"` | |||
if conf.AsyncCommittingRetryPeriod, err = time.ParseDuration(conf.Async_Committing_Retry_Period); err != nil { | |||
return errors.WithMessagef(err, "time.ParseDuration(Async_Committing_Retry_Period{%#v})", conf.Async_Committing_Retry_Period) | |||
} | |||
GettySessionParam config.GettySessionParam `required:"true" yaml:"getty_session_param" json:"getty_session_param,omitempty"` | |||
} `required:"true" yaml:"getty_config" json:"getty_config,omitempty"` | |||
if conf.LogDeletePeriod, err = time.ParseDuration(conf.Log_Delete_Period); err != nil { | |||
return errors.WithMessagef(err, "time.ParseDuration(Log_Delete_Period{%#v})", conf.Log_Delete_Period) | |||
} | |||
UndoConfig struct { | |||
LogSaveDays int16 `default:"7" yaml:"log_save_days" json:"log_save_days,omitempty"` | |||
} `required:"true" yaml:"undo_config" json:"undo_config,omitempty"` | |||
return errors.WithStack(c.GettyConfig.CheckValidity()) | |||
StoreConfig StoreConfig `required:"true" yaml:"store_config" json:"store_config,omitempty"` | |||
RegistryConfig config.RegistryConfig `yaml:"registry_config" json:"registry_config,omitempty"` //注册中心配置信息 | |||
ConfigCenterConfig config.ConfigCenterConfig `yaml:"config_center" json:"config_center,omitempty"` //配置中心配置信息 | |||
} | |||
// LoadFromEnv provides env variable fallback for config. | |||
// Credential configs, such as db password, can be provided by env variables. | |||
func (c *ServerConfig) LoadFromEnv() { | |||
// store config | |||
if c.StoreConfig.DBStoreConfig.DSN == "" { | |||
c.StoreConfig.DBStoreConfig.DSN = os.Getenv("SEATA_STORE_DB_DSN") | |||
} | |||
func GetServerConfig() *ServerConfig { | |||
return serverConfig | |||
} | |||
func InitConf(confFile string) error { | |||
var err error | |||
if confFile == "" { | |||
return errors.WithMessagef(err, fmt.Sprintf("application configure file name is nil")) | |||
} | |||
if path.Ext(confFile) != ".yml" { | |||
return errors.WithMessagef(err, fmt.Sprintf("application configure file name{%v} suffix must be .yml", confFile)) | |||
} | |||
func GetStoreConfig() StoreConfig { | |||
return serverConfig.StoreConfig | |||
} | |||
conf = ServerConfig{} | |||
confFileStream, err := ioutil.ReadFile(confFile) | |||
// Parse parses an input configuration yaml document into a ServerConfig struct | |||
// | |||
// Environment variables may be used to override configuration parameters other than version, | |||
// following the scheme below: | |||
// ServerConfig.Abc may be replaced by the value of SEATA_ABC, | |||
// ServerConfig.Abc.Xyz may be replaced by the value of SEATA_ABC_XYZ, and so forth | |||
func parse(rd io.Reader) (*ServerConfig, error) { | |||
in, err := ioutil.ReadAll(rd) | |||
if err != nil { | |||
return errors.WithMessagef(err, fmt.Sprintf("ioutil.ReadFile(file:%s) = error:%s", confFile, err)) | |||
return nil, err | |||
} | |||
err = yaml.Unmarshal(confFileStream, &conf) | |||
p := parser.NewParser("seata") | |||
config := new(ServerConfig) | |||
err = p.Parse(in, config) | |||
if err != nil { | |||
return errors.WithMessagef(err, fmt.Sprintf("yaml.Unmarshal() = error:%s", err)) | |||
} | |||
(&conf).LoadFromEnv() | |||
//加载获取远程配置 | |||
loadConfigCenterConfig(&conf.ConfigCenterConfig) | |||
(&conf).CheckValidity() | |||
if conf.StoreConfig.StoreMode == "db" && conf.StoreConfig.DBStoreConfig.DSN != "" { | |||
engine, err := xorm.NewEngine("mysql", conf.StoreConfig.DBStoreConfig.DSN) | |||
if err != nil { | |||
panic(err) | |||
} | |||
conf.StoreConfig.DBStoreConfig.Engine = engine | |||
return nil, err | |||
} | |||
return nil | |||
return config, nil | |||
} | |||
func loadConfigCenterConfig(centerConf *baseConfig.ConfigCenterConfig) { | |||
if centerConf.Mode == "" { | |||
func loadConfigCenterConfig(config *ServerConfig) { | |||
if config.ConfigCenterConfig.Mode == "" { | |||
return | |||
} | |||
cc, err := extension.GetConfigCenter(centerConf.Mode, centerConf) | |||
cc, err := extension.GetConfigCenter(config.ConfigCenterConfig.Mode, &config.ConfigCenterConfig) | |||
if err != nil { | |||
log.Error("ConfigCenter can not connect success.Error message is %s", err.Error()) | |||
} | |||
confStr := config_center.LoadConfigCenterConfig(cc, centerConf, &ServerConfigListener{}) | |||
updateConf(&conf, confStr) | |||
remoteConfig := config_center.LoadConfigCenterConfig(cc, &config.ConfigCenterConfig, &ServerConfigListener{}) | |||
updateConf(config, remoteConfig) | |||
} | |||
type ServerConfigListener struct { | |||
} | |||
func (ServerConfigListener) Process(event *config_center.ConfigChangeEvent) { | |||
//更新conf | |||
conf := GetServerConfig() | |||
updateConf(&conf, event.Value.(string)) | |||
serverConfig := GetServerConfig() | |||
updateConf(serverConfig, event.Value.(string)) | |||
} | |||
func updateConf(config *ServerConfig, confStr string) { | |||
func updateConf(config *ServerConfig, remoteConfig string) { | |||
newConf := &ServerConfig{} | |||
confByte := []byte(confStr) | |||
confByte := []byte(remoteConfig) | |||
yaml.Unmarshal(confByte, newConf) | |||
//合并配置中心的配置和本地文件的配置,形成新的配置 | |||
if err := mergo.Merge(config, newConf, mergo.WithOverride); err != nil { | |||
log.Error("merge config fail %s ", err.Error()) | |||
} | |||
} | |||
func InitConf(configPath string) (*ServerConfig, error) { | |||
var configFilePath string | |||
if configPath != "" { | |||
configFilePath = configPath | |||
} else if os.Getenv("SEATA_CONFIGURATION_PATH") != "" { | |||
configFilePath = os.Getenv("SEATA_CONFIGURATION_PATH") | |||
} | |||
if configFilePath == "" { | |||
return nil, fmt.Errorf("configuration path unspecified") | |||
} | |||
fp, err := os.Open(configFilePath) | |||
if err != nil { | |||
return nil, err | |||
} | |||
defer fp.Close() | |||
conf, err := parse(fp) | |||
if err != nil { | |||
return nil, fmt.Errorf("error parsing %s: %v", configFilePath, err) | |||
} | |||
if conf.GettyConfig.SessionTimeout >= time.Duration(getty.MaxWheelTimeSpan) { | |||
return nil, errors.Errorf("session_timeout %s should be less than %s", | |||
serverConfig.GettyConfig.SessionTimeout, time.Duration(getty.MaxWheelTimeSpan)) | |||
} | |||
loadConfigCenterConfig(conf) | |||
config.InitRegistryConfig(&conf.RegistryConfig) | |||
serverConfig = conf | |||
if conf.StoreConfig.StoreMode == "db" && conf.StoreConfig.DBStoreConfig.DSN != "" { | |||
engine, err := xorm.NewEngine("mysql", conf.StoreConfig.DBStoreConfig.DSN) | |||
if err != nil { | |||
panic(err) | |||
} | |||
conf.StoreConfig.DBStoreConfig.Engine = engine | |||
} | |||
return serverConfig, nil | |||
} |
@@ -1,5 +0,0 @@ | |||
package config | |||
type UndoConfig struct { | |||
LogSaveDays int16 `default:"7" yaml:"log_save_days" json:"log_save_days,omitempty"` | |||
} |
@@ -9,7 +9,6 @@ import ( | |||
"github.com/transaction-wg/seata-golang/pkg/tc/config" | |||
"github.com/transaction-wg/seata-golang/pkg/tc/model" | |||
"github.com/transaction-wg/seata-golang/pkg/tc/session" | |||
"github.com/transaction-wg/seata-golang/pkg/util/uuid" | |||
) | |||
type DataBaseSessionManager struct { | |||
@@ -123,7 +122,7 @@ func (sessionManager *DataBaseSessionManager) AllSessions() []*session.GlobalSes | |||
ss := sessionManager.FindGlobalSessions(model.SessionCondition{ | |||
Statuses: []meta.GlobalStatus{meta.GlobalStatusRollbackRetrying, | |||
meta.GlobalStatusRollbacking, | |||
meta.GlobalStatusTimeoutRollbacking, | |||
meta.GlobalStatusTimeoutRollingBack, | |||
meta.GlobalStatusTimeoutRollbackRetrying, | |||
}, | |||
}) | |||
@@ -132,7 +131,7 @@ func (sessionManager *DataBaseSessionManager) AllSessions() []*session.GlobalSes | |||
return sessionManager.FindGlobalSessions(model.SessionCondition{ | |||
Statuses: []meta.GlobalStatus{meta.GlobalStatusUnknown, meta.GlobalStatusBegin, | |||
meta.GlobalStatusCommitting, meta.GlobalStatusCommitRetrying, meta.GlobalStatusRollbacking, | |||
meta.GlobalStatusRollbackRetrying, meta.GlobalStatusTimeoutRollbacking, meta.GlobalStatusTimeoutRollbackRetrying, | |||
meta.GlobalStatusRollbackRetrying, meta.GlobalStatusTimeoutRollingBack, meta.GlobalStatusTimeoutRollbackRetrying, | |||
meta.GlobalStatusAsyncCommitting, | |||
}, | |||
}) | |||
@@ -144,8 +143,5 @@ func (sessionManager *DataBaseSessionManager) FindGlobalSessions(condition model | |||
} | |||
func (sessionManager *DataBaseSessionManager) Reload() { | |||
maxSessionID := sessionManager.TransactionStoreManager.GetCurrentMaxSessionID() | |||
if maxSessionID > uuid.UUID { | |||
uuid.SetUUID(uuid.UUID, maxSessionID) | |||
} | |||
} |
@@ -5,7 +5,6 @@ import ( | |||
"github.com/transaction-wg/seata-golang/pkg/tc/model" | |||
"github.com/transaction-wg/seata-golang/pkg/tc/session" | |||
"github.com/transaction-wg/seata-golang/pkg/util/log" | |||
"github.com/transaction-wg/seata-golang/pkg/util/uuid" | |||
) | |||
type DBTransactionStoreManager struct { | |||
@@ -144,10 +143,6 @@ func (storeManager *DBTransactionStoreManager) Shutdown() { | |||
} | |||
func (storeManager *DBTransactionStoreManager) GetCurrentMaxSessionID() int64 { | |||
return storeManager.LogStore.GetCurrentMaxSessionID(uuid.GetMaxUUID(), uuid.GetInitUUID()) | |||
} | |||
func getGlobalSession(globalTransactionDO *model.GlobalTransactionDO, branchTransactionDOs []*model.BranchTransactionDO) *session.GlobalSession { | |||
globalSession := convertGlobalTransaction(globalTransactionDO) | |||
if branchTransactionDOs != nil && len(branchTransactionDOs) > 0 { | |||
@@ -35,7 +35,7 @@ func TestDefaultSessionManager_FindGlobalSession(t *testing.T) { | |||
} | |||
func globalSessionsProvider() []*session.GlobalSession { | |||
common.GetXID().Init("127.0.0.1", 9876) | |||
common.Init("127.0.0.1", 9876) | |||
result := make([]*session.GlobalSession, 0) | |||
gs1 := session.NewGlobalSession( | |||
@@ -60,7 +60,7 @@ func globalSessionsProvider() []*session.GlobalSession { | |||
func globalSessionProvider(t *testing.T) *session.GlobalSession { | |||
testutil.InitServerConfig(t) | |||
common.GetXID().Init("127.0.0.1", 9876) | |||
common.Init("127.0.0.1", 9876) | |||
gs := session.NewGlobalSession( | |||
session.WithGsApplicationID("demo-cmd"), | |||
@@ -5,7 +5,6 @@ import ( | |||
"github.com/transaction-wg/seata-golang/pkg/tc/config" | |||
"github.com/transaction-wg/seata-golang/pkg/tc/session" | |||
"github.com/transaction-wg/seata-golang/pkg/util/log" | |||
"github.com/transaction-wg/seata-golang/pkg/util/uuid" | |||
) | |||
type FileBasedSessionManager struct { | |||
@@ -89,11 +88,9 @@ func (sessionManager *FileBasedSessionManager) washSessions() { | |||
} | |||
func (sessionManager *FileBasedSessionManager) restore(stores []*TransactionWriteStore, unhandledBranchSessions map[int64]*session.BranchSession) { | |||
maxRecoverID := uuid.UUID | |||
for _, store := range stores { | |||
logOperation := store.LogOperation | |||
sessionStorable := store.SessionRequest | |||
maxRecoverID = getMaxID(maxRecoverID, sessionStorable) | |||
switch logOperation { | |||
case LogOperationGlobalAdd, LogOperationGlobalUpdate: | |||
{ | |||
@@ -164,38 +161,4 @@ func (sessionManager *FileBasedSessionManager) restore(stores []*TransactionWrit | |||
break | |||
} | |||
} | |||
setMaxID(maxRecoverID) | |||
} | |||
func getMaxID(maxRecoverID int64, sessionStorable session.SessionStorable) int64 { | |||
var currentID int64 = 0 | |||
var gs, ok1 = sessionStorable.(*session.GlobalSession) | |||
if ok1 { | |||
currentID = gs.TransactionID | |||
} | |||
var bs, ok2 = sessionStorable.(*session.BranchSession) | |||
if ok2 { | |||
currentID = bs.BranchID | |||
} | |||
if maxRecoverID > currentID { | |||
return maxRecoverID | |||
} else { | |||
return currentID | |||
} | |||
} | |||
func setMaxID(maxRecoverID int64) { | |||
var currentID int64 | |||
// will be recover multi-thread later | |||
for { | |||
currentID = uuid.UUID | |||
if currentID < maxRecoverID { | |||
if uuid.SetUUID(currentID, maxRecoverID) { | |||
break | |||
} | |||
} | |||
break | |||
} | |||
} |
@@ -113,9 +113,6 @@ func (storeManager *FileTransactionStoreManager) Shutdown() { | |||
storeManager.currFileChannel.Close() | |||
} | |||
func (storeManager *FileTransactionStoreManager) GetCurrentMaxSessionID() int64 { | |||
return int64(0) | |||
} | |||
func (storeManager *FileTransactionStoreManager) saveHistory() { | |||
storeManager.findTimeoutAndSave() | |||
@@ -25,7 +25,7 @@ const ( | |||
status, client_id, application_data, gmt_create, gmt_modified) values(?, ?, ?, ?, ?, ?, ?, ?, ?, now(6), now(6))` | |||
UpdateBranchTransactionDO = "update branch_table set status = ?, gmt_modified = now(6) where xid = ? and branch_id = ?" | |||
DeleteBranchTransactionDO = "delete from branch_table where xid = ? and branch_id = ?" | |||
QueryMaxTransactionID = "select max(transaction_id) as maxTransactioID from global_table where transaction_id < ? and transaction_id > ?" | |||
QueryMaxTransactionID = "select max(transaction_id) as maxTransactionID from global_table where transaction_id < ? and transaction_id > ?" | |||
QueryMaxBranchID = "select max(branch_id) as maxBranchID from branch_table where branch_id < ? and branch_id > ?" | |||
) | |||
@@ -82,7 +82,7 @@ func (sessionHolder SessionHolder) reload() { | |||
case meta.GlobalStatusCommitting, meta.GlobalStatusCommitRetrying: | |||
sessionHolder.RetryCommittingSessionManager.AddGlobalSession(globalSession) | |||
break | |||
case meta.GlobalStatusRollbacking, meta.GlobalStatusRollbackRetrying, meta.GlobalStatusTimeoutRollbacking, | |||
case meta.GlobalStatusRollbacking, meta.GlobalStatusRollbackRetrying, meta.GlobalStatusTimeoutRollingBack, | |||
meta.GlobalStatusTimeoutRollbackRetrying: | |||
sessionHolder.RetryRollbackingSessionManager.AddGlobalSession(globalSession) | |||
break | |||
@@ -74,9 +74,6 @@ type TransactionStoreManager interface { | |||
// Shutdown. | |||
Shutdown() | |||
// Gets current max session id. | |||
GetCurrentMaxSessionID() int64 | |||
} | |||
type AbstractTransactionStoreManager struct { | |||
@@ -24,8 +24,9 @@ func TestLockManager_AcquireLock(t *testing.T) { | |||
} | |||
func TestLockManager_IsLockable(t *testing.T) { | |||
transID := uuid.GeneratorUUID() | |||
ok := GetLockManager().IsLockable(common.GetXID().GenerateXID(transID), "tb_1", "tb_1:13") | |||
transID := uuid.NextID() | |||
common.Init("127.0.0.1", 9876) | |||
ok := GetLockManager().IsLockable(common.GenerateXID(transID), "tb_1", "tb_1:13") | |||
assert.Equal(t, ok, true) | |||
} | |||
@@ -67,7 +68,7 @@ func TestLockManager_IsLockable2(t *testing.T) { | |||
result1 := GetLockManager().IsLockable(bs.XID, bs.ResourceID, bs.LockKey) | |||
assert.True(t, result1) | |||
GetLockManager().AcquireLock(bs) | |||
bs.TransactionID = uuid.GeneratorUUID() | |||
bs.TransactionID = uuid.NextID() | |||
result2 := GetLockManager().IsLockable(bs.XID, bs.ResourceID, bs.LockKey) | |||
assert.False(t, result2) | |||
} | |||
@@ -103,10 +104,11 @@ func branchSessionsProvider() []*session.BranchSession { | |||
func baseBranchSessionsProvider(resourceID string, lockKey1 string, lockKey2 string) []*session.BranchSession { | |||
var branchSessions = make([]*session.BranchSession, 0) | |||
transID := uuid.GeneratorUUID() | |||
transID2 := uuid.GeneratorUUID() | |||
transID := uuid.NextID() | |||
transID2 := uuid.NextID() | |||
common.Init("127.0.0.1", 9876) | |||
bs := session.NewBranchSession( | |||
session.WithBsXid(common.GetXID().GenerateXID(transID)), | |||
session.WithBsXid(common.GenerateXID(transID)), | |||
session.WithBsTransactionID(transID), | |||
session.WithBsBranchID(1), | |||
session.WithBsResourceGroupID("my_test_tx_group"), | |||
@@ -119,7 +121,7 @@ func baseBranchSessionsProvider(resourceID string, lockKey1 string, lockKey2 str | |||
) | |||
bs1 := session.NewBranchSession( | |||
session.WithBsXid(common.GetXID().GenerateXID(transID2)), | |||
session.WithBsXid(common.GenerateXID(transID2)), | |||
session.WithBsTransactionID(transID2), | |||
session.WithBsBranchID(1), | |||
session.WithBsResourceGroupID("my_test_tx_group"), | |||
@@ -137,11 +139,11 @@ func baseBranchSessionsProvider(resourceID string, lockKey1 string, lockKey2 str | |||
} | |||
func branchSessionProvider() *session.BranchSession { | |||
common.GetXID().Init("127.0.0.1", 9876) | |||
common.Init("127.0.0.1", 9876) | |||
transID := uuid.GeneratorUUID() | |||
transID := uuid.NextID() | |||
bs := session.NewBranchSession( | |||
session.WithBsXid(common.GetXID().GenerateXID(transID)), | |||
session.WithBsXid(common.GenerateXID(transID)), | |||
session.WithBsTransactionID(transID), | |||
session.WithBsBranchID(1), | |||
session.WithBsResourceGroupID("my_test_tx_group"), | |||
@@ -17,5 +17,5 @@ func TestLockStoreDataBaseDao_UnLockByXidAndBranchIDs(t *testing.T) { | |||
} | |||
lockStore := &LockStoreDataBaseDao{engine: engine} | |||
lockStore.UnLockByXidAndBranchIDs(":0:2000042936", []int64{2000042938}) | |||
lockStore.UnLockByXIDAndBranchIDs(":0:2000042936", []int64{2000042938}) | |||
} |
@@ -27,38 +27,28 @@ import ( | |||
) | |||
const ( | |||
RPC_REQUEST_TIMEOUT = 30 * time.Second | |||
ALWAYS_RETRY_BOUNDARY = 0 | |||
RpcRequestTimeout = 30 * time.Second | |||
AlwaysRetryBoundary = 0 | |||
) | |||
type DefaultCoordinator struct { | |||
conf config.ServerConfig | |||
conf *config.ServerConfig | |||
core TransactionCoordinator | |||
idGenerator *atomic.Uint32 | |||
futures *sync.Map | |||
timeoutCheckTicker *time.Ticker | |||
retryRollbackingTicker *time.Ticker | |||
retryCommittingTicker *time.Ticker | |||
asyncCommittingTicker *time.Ticker | |||
undoLogDeleteTicker *time.Ticker | |||
} | |||
func NewDefaultCoordinator(conf config.ServerConfig) *DefaultCoordinator { | |||
func NewDefaultCoordinator(conf *config.ServerConfig) *DefaultCoordinator { | |||
coordinator := &DefaultCoordinator{ | |||
conf: conf, | |||
idGenerator: &atomic.Uint32{}, | |||
futures: &sync.Map{}, | |||
timeoutCheckTicker: time.NewTicker(conf.TimeoutRetryPeriod), | |||
retryRollbackingTicker: time.NewTicker(conf.RollbackingRetryPeriod), | |||
retryCommittingTicker: time.NewTicker(conf.CommittingRetryPeriod), | |||
asyncCommittingTicker: time.NewTicker(conf.AsyncCommittingRetryPeriod), | |||
undoLogDeleteTicker: time.NewTicker(conf.LogDeletePeriod), | |||
} | |||
core := NewCore(coordinator) | |||
coordinator.core = core | |||
go coordinator.processTimeoutCheck() | |||
go coordinator.processRetryRollbacking() | |||
go coordinator.processRetryRollingBack() | |||
go coordinator.processRetryCommitting() | |||
go coordinator.processAsyncCommitting() | |||
go coordinator.processUndoLogDelete() | |||
@@ -139,36 +129,56 @@ func (coordinator *DefaultCoordinator) defaultSendResponse(request protocal.RpcM | |||
func (coordinator *DefaultCoordinator) processTimeoutCheck() { | |||
for { | |||
<-coordinator.timeoutCheckTicker.C | |||
coordinator.timeoutCheck() | |||
timer := time.NewTimer(coordinator.conf.TimeoutRetryPeriod) | |||
select { | |||
case <-timer.C: | |||
coordinator.timeoutCheck() | |||
} | |||
timer.Stop() | |||
} | |||
} | |||
func (coordinator *DefaultCoordinator) processRetryRollbacking() { | |||
func (coordinator *DefaultCoordinator) processRetryRollingBack() { | |||
for { | |||
<-coordinator.retryRollbackingTicker.C | |||
coordinator.handleRetryRollbacking() | |||
timer := time.NewTimer(coordinator.conf.RollingBackRetryPeriod) | |||
select { | |||
case <-timer.C: | |||
coordinator.handleRetryRollbacking() | |||
} | |||
timer.Stop() | |||
} | |||
} | |||
func (coordinator *DefaultCoordinator) processRetryCommitting() { | |||
for { | |||
<-coordinator.retryCommittingTicker.C | |||
coordinator.handleRetryCommitting() | |||
timer := time.NewTimer(coordinator.conf.CommittingRetryPeriod) | |||
select { | |||
case <-timer.C: | |||
coordinator.handleRetryCommitting() | |||
} | |||
timer.Stop() | |||
} | |||
} | |||
func (coordinator *DefaultCoordinator) processAsyncCommitting() { | |||
for { | |||
<-coordinator.asyncCommittingTicker.C | |||
coordinator.handleAsyncCommitting() | |||
timer := time.NewTimer(coordinator.conf.AsyncCommittingRetryPeriod) | |||
select { | |||
case <-timer.C: | |||
coordinator.handleAsyncCommitting() | |||
} | |||
timer.Stop() | |||
} | |||
} | |||
func (coordinator *DefaultCoordinator) processUndoLogDelete() { | |||
for { | |||
<-coordinator.undoLogDeleteTicker.C | |||
coordinator.undoLogDelete() | |||
timer := time.NewTimer(coordinator.conf.LogDeletePeriod) | |||
select { | |||
case <-timer.C: | |||
coordinator.undoLogDelete() | |||
} | |||
timer.Stop() | |||
} | |||
} | |||
@@ -190,7 +200,7 @@ func (coordinator *DefaultCoordinator) timeoutCheck() { | |||
if globalSession.Active { | |||
globalSession.Active = false | |||
} | |||
changeGlobalSessionStatus(globalSession, meta.GlobalStatusTimeoutRollbacking) | |||
changeGlobalSessionStatus(globalSession, meta.GlobalStatusTimeoutRollingBack) | |||
evt := event.NewGlobalTransactionEvent(globalSession.TransactionID, event.RoleTC, globalSession.TransactionName, globalSession.BeginTime, 0, globalSession.Status) | |||
event.EventBus.GlobalTransactionEventChannel <- evt | |||
return true | |||
@@ -205,32 +215,32 @@ func (coordinator *DefaultCoordinator) timeoutCheck() { | |||
} | |||
func (coordinator *DefaultCoordinator) handleRetryRollbacking() { | |||
rollbackingSessions := holder.GetSessionHolder().RetryRollbackingSessionManager.AllSessions() | |||
if rollbackingSessions == nil && len(rollbackingSessions) <= 0 { | |||
rollingBackSessions := holder.GetSessionHolder().RetryRollbackingSessionManager.AllSessions() | |||
if rollingBackSessions == nil && len(rollingBackSessions) <= 0 { | |||
return | |||
} | |||
now := time2.CurrentTimeMillis() | |||
for _, rollbackingSession := range rollbackingSessions { | |||
if rollbackingSession.Status == meta.GlobalStatusRollbacking && !rollbackingSession.IsRollbackingDead() { | |||
for _, rollingBackSession := range rollingBackSessions { | |||
if rollingBackSession.Status == meta.GlobalStatusRollbacking && !rollingBackSession.IsRollbackingDead() { | |||
continue | |||
} | |||
if isRetryTimeout(int64(now), coordinator.conf.MaxRollbackRetryTimeout, rollbackingSession.BeginTime) { | |||
if isRetryTimeout(int64(now), coordinator.conf.MaxRollbackRetryTimeout, rollingBackSession.BeginTime) { | |||
if coordinator.conf.RollbackRetryTimeoutUnlockEnable { | |||
lock.GetLockManager().ReleaseGlobalSessionLock(rollbackingSession) | |||
lock.GetLockManager().ReleaseGlobalSessionLock(rollingBackSession) | |||
} | |||
holder.GetSessionHolder().RetryRollbackingSessionManager.RemoveGlobalSession(rollbackingSession) | |||
log.Errorf("GlobalSession rollback retry timeout and removed [%s]", rollbackingSession.XID) | |||
holder.GetSessionHolder().RetryRollbackingSessionManager.RemoveGlobalSession(rollingBackSession) | |||
log.Errorf("GlobalSession rollback retry timeout and removed [%s]", rollingBackSession.XID) | |||
continue | |||
} | |||
_, err := coordinator.core.doGlobalRollback(rollbackingSession, true) | |||
_, err := coordinator.core.doGlobalRollback(rollingBackSession, true) | |||
if err != nil { | |||
log.Infof("Failed to retry rollbacking [%s]", rollbackingSession.XID) | |||
log.Infof("Failed to retry rolling back [%s]", rollingBackSession.XID) | |||
} | |||
} | |||
} | |||
func isRetryTimeout(now int64, timeout int64, beginTime int64) bool { | |||
if timeout >= ALWAYS_RETRY_BOUNDARY && now-beginTime > timeout { | |||
if timeout >= AlwaysRetryBoundary && now-beginTime > timeout { | |||
return true | |||
} | |||
return false | |||
@@ -287,9 +297,4 @@ func (coordinator *DefaultCoordinator) undoLogDelete() { | |||
} | |||
func (coordinator *DefaultCoordinator) Stop() { | |||
coordinator.timeoutCheckTicker.Stop() | |||
coordinator.retryRollbackingTicker.Stop() | |||
coordinator.retryCommittingTicker.Stop() | |||
coordinator.asyncCommittingTicker.Stop() | |||
coordinator.undoLogDeleteTicker.Stop() | |||
} |
@@ -25,7 +25,7 @@ func (coordinator *DefaultCoordinator) SendResponse(request protocal.RpcMessage, | |||
} | |||
func (coordinator *DefaultCoordinator) SendSyncRequest(resourceID string, clientID string, message interface{}) (interface{}, error) { | |||
return coordinator.SendSyncRequestWithTimeout(resourceID, clientID, message, RPC_REQUEST_TIMEOUT) | |||
return coordinator.SendSyncRequestWithTimeout(resourceID, clientID, message, RpcRequestTimeout) | |||
} | |||
func (coordinator *DefaultCoordinator) SendSyncRequestWithTimeout(resourceID string, clientID string, message interface{}, timeout time.Duration) (interface{}, error) { | |||
@@ -37,7 +37,7 @@ func (coordinator *DefaultCoordinator) SendSyncRequestWithTimeout(resourceID str | |||
} | |||
func (coordinator *DefaultCoordinator) SendSyncRequestByGetty(session getty.Session, message interface{}) (interface{}, error) { | |||
return coordinator.SendSyncRequestByGettyWithTimeout(session, message, RPC_REQUEST_TIMEOUT) | |||
return coordinator.SendSyncRequestByGettyWithTimeout(session, message, RpcRequestTimeout) | |||
} | |||
func (coordinator *DefaultCoordinator) SendSyncRequestByGettyWithTimeout(session getty.Session, message interface{}, timeout time.Duration) (interface{}, error) { | |||
@@ -469,7 +469,7 @@ func (core *DefaultCore) doGlobalRollback(globalSession *session.GlobalSession, | |||
} | |||
branchStatus, err1 := core.branchRollback(globalSession, bs) | |||
if err1 != nil { | |||
log.Errorf("Exception rollbacking branch xid=%d branchID=%d", globalSession.XID, bs.BranchID) | |||
log.Errorf("Exception rolling back branch xid=%d branchID=%d", globalSession.XID, bs.BranchID) | |||
if !retrying { | |||
queueToRetryRollback(globalSession) | |||
} | |||
@@ -501,7 +501,7 @@ func (core *DefaultCore) doGlobalRollback(globalSession *session.GlobalSession, | |||
// failure due to data changes. | |||
gs := holder.GetSessionHolder().RootSessionManager.FindGlobalSession(globalSession.XID) | |||
if gs != nil && gs.HasBranch() { | |||
log.Infof("Global[%d] rollbacking is NOT done.", globalSession.XID) | |||
log.Infof("Global[%d] rolling back is NOT done.", globalSession.XID) | |||
return false, nil | |||
} | |||
} | |||
@@ -576,7 +576,7 @@ func queueToRetryRollback(globalSession *session.GlobalSession) { | |||
func isTimeoutGlobalStatus(status meta.GlobalStatus) bool { | |||
return status == meta.GlobalStatusTimeoutRollbacked || | |||
status == meta.GlobalStatusTimeoutRollbackFailed || | |||
status == meta.GlobalStatusTimeoutRollbacking || | |||
status == meta.GlobalStatusTimeoutRollingBack || | |||
status == meta.GlobalStatusTimeoutRollbackRetrying | |||
} | |||
@@ -6,7 +6,6 @@ import ( | |||
"os" | |||
"os/signal" | |||
"strconv" | |||
"strings" | |||
"syscall" | |||
"time" | |||
) | |||
@@ -18,7 +17,7 @@ import ( | |||
) | |||
import ( | |||
"github.com/transaction-wg/seata-golang/pkg/base/common/extension" | |||
"github.com/transaction-wg/seata-golang/pkg/base/extension" | |||
"github.com/transaction-wg/seata-golang/pkg/base/getty/readwriter" | |||
"github.com/transaction-wg/seata-golang/pkg/base/registry" | |||
"github.com/transaction-wg/seata-golang/pkg/tc/config" | |||
@@ -26,7 +25,7 @@ import ( | |||
) | |||
type Server struct { | |||
conf config.ServerConfig | |||
conf *config.ServerConfig | |||
tcpServer getty.Server | |||
rpcHandler *DefaultCoordinator | |||
} | |||
@@ -81,18 +80,16 @@ func (s *Server) Start(addr string) { | |||
var ( | |||
tcpServer getty.Server | |||
) | |||
//直接使用addr绑定有ip,如果是127.0.0.1,则通过网卡正式ip不能访问 | |||
addrs := strings.Split(addr, ":") | |||
tcpServer = getty.NewTCPServer( | |||
//getty.WithLocalAddress(addr), | |||
getty.WithLocalAddress(":"+addrs[1]), | |||
getty.WithLocalAddress(addr), | |||
getty.WithServerTaskPool(gxsync.NewTaskPoolSimple(0)), | |||
) | |||
tcpServer.RunEventLoop(s.newSession) | |||
log.Debugf("s bind addr{%s} ok!", addr) | |||
s.tcpServer = tcpServer | |||
//向注册中心注册实例 | |||
registryInstance() | |||
registryInstance(s.conf) | |||
c := make(chan os.Signal, 1) | |||
signal.Notify(c, syscall.SIGHUP, syscall.SIGQUIT, syscall.SIGTERM, syscall.SIGINT) | |||
for { | |||
@@ -110,16 +107,14 @@ func (s *Server) Start(addr string) { | |||
} | |||
} | |||
func registryInstance() { | |||
registryConfig := config.GetRegistryConfig() | |||
reg, err := extension.GetRegistry(registryConfig.Mode) | |||
func registryInstance(config *config.ServerConfig) { | |||
reg, err := extension.GetRegistry(config.RegistryConfig.Mode) | |||
if err != nil { | |||
log.Error("Registry can not connect success, program is going to panic.Error message is %s", err.Error()) | |||
panic(err.Error()) | |||
} | |||
ip, _ := gxnet.GetLocalIP() | |||
conf := config.GetServerConfig() | |||
port, _ := strconv.Atoi(conf.Port) | |||
port, _ := strconv.Atoi(config.Port) | |||
reg.Register(®istry.Address{ | |||
IP: ip, | |||
Port: uint64(port), | |||
@@ -102,7 +102,7 @@ func WithBsApplicationData(applicationData []byte) BranchSessionOption { | |||
func NewBranchSession(opts ...BranchSessionOption) *BranchSession { | |||
session := &BranchSession{ | |||
BranchID: uuid.GeneratorUUID(), | |||
BranchID: uuid.NextID(), | |||
Status: meta.BranchStatusUnknown, | |||
} | |||
for _, o := range opts { | |||
@@ -115,7 +115,7 @@ func NewBranchSessionByGlobal(gs GlobalSession, opts ...BranchSessionOption) *Br | |||
bs := &BranchSession{ | |||
XID: gs.XID, | |||
TransactionID: gs.TransactionID, | |||
BranchID: uuid.GeneratorUUID(), | |||
BranchID: uuid.NextID(), | |||
Status: meta.BranchStatusUnknown, | |||
} | |||
for _, o := range opts { | |||
@@ -29,7 +29,7 @@ func TestBranchSession_Encode_Decode(t *testing.T) { | |||
func branchSessionProvider() *BranchSession { | |||
bs := NewBranchSession( | |||
WithBsTransactionID(uuid.GeneratorUUID()), | |||
WithBsTransactionID(uuid.NextID()), | |||
WithBsBranchID(1), | |||
WithBsResourceGroupID("my_test_tx_group"), | |||
WithBsResourceID("tb_1"), | |||
@@ -106,10 +106,10 @@ func WithGsActive(active bool) GlobalSessionOption { | |||
func NewGlobalSession(opts ...GlobalSessionOption) *GlobalSession { | |||
gs := &GlobalSession{ | |||
BranchSessions: make(map[*BranchSession]bool), | |||
TransactionID: uuid.GeneratorUUID(), | |||
TransactionID: uuid.NextID(), | |||
Active: true, | |||
} | |||
gs.XID = common.GetXID().GenerateXID(gs.TransactionID) | |||
gs.XID = common.GenerateXID(gs.TransactionID) | |||
for _, o := range opts { | |||
o(gs) | |||
} | |||
@@ -30,6 +30,6 @@ func InitServerConfig(t *testing.T) { | |||
_, err = file.Write(bytes) | |||
assert.NoError(t, err, "Write() should success") | |||
err = config.InitConf(file.Name()) | |||
_, err = config.InitConf(file.Name()) | |||
assert.NoError(t, err, "InitConf() should success") | |||
} |
@@ -1,6 +1,11 @@ | |||
package log | |||
import ( | |||
"bytes" | |||
"errors" | |||
"fmt" | |||
"github.com/natefinch/lumberjack" | |||
"go.uber.org/zap" | |||
"go.uber.org/zap/zapcore" | |||
) | |||
@@ -26,6 +31,36 @@ const ( | |||
FatalLevel = LogLevel(zapcore.FatalLevel) | |||
) | |||
func (l *LogLevel) UnmarshalText(text []byte) error { | |||
if l == nil { | |||
return errors.New("can't unmarshal a nil *Level") | |||
} | |||
if !l.unmarshalText(text) && !l.unmarshalText(bytes.ToLower(text)) { | |||
return fmt.Errorf("unrecognized level: %q", text) | |||
} | |||
return nil | |||
} | |||
func (l *LogLevel) unmarshalText(text []byte) bool { | |||
switch string(text) { | |||
case "debug", "DEBUG": | |||
*l = DebugLevel | |||
case "info", "INFO", "": // make the zero value useful | |||
*l = InfoLevel | |||
case "warn", "WARN": | |||
*l = WarnLevel | |||
case "error", "ERROR": | |||
*l = ErrorLevel | |||
case "panic", "PANIC": | |||
*l = PanicLevel | |||
case "fatal", "FATAL": | |||
*l = FatalLevel | |||
default: | |||
return false | |||
} | |||
return true | |||
} | |||
type Logger interface { | |||
Debug(v ...interface{}) | |||
Debugf(format string, v ...interface{}) | |||
@@ -71,6 +106,27 @@ func init() { | |||
log = zapLogger.Sugar() | |||
} | |||
func Init(logPath string, level LogLevel) { | |||
lumberJackLogger := &lumberjack.Logger{ | |||
Filename: logPath, | |||
MaxSize: 10, | |||
MaxBackups: 5, | |||
MaxAge: 30, | |||
Compress: false, | |||
} | |||
syncer := zapcore.AddSync(lumberJackLogger) | |||
encoderConfig := zap.NewProductionEncoderConfig() | |||
encoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder | |||
encoderConfig.EncodeLevel = zapcore.CapitalLevelEncoder | |||
encoder := zapcore.NewConsoleEncoder(encoderConfig) | |||
core := zapcore.NewCore(encoder, syncer, zap.NewAtomicLevelAt(zapcore.Level(level))) | |||
zapLogger = zap.New(core, zap.AddCaller()) | |||
log = zapLogger.Sugar() | |||
} | |||
// SetLogger: customize yourself logger. | |||
func SetLogger(logger Logger) { | |||
log = logger | |||
@@ -81,32 +137,6 @@ func GetLogger() Logger { | |||
return log | |||
} | |||
// SetLoggerLevel | |||
func SetLoggerLevel(level LogLevel) error { | |||
var err error | |||
zapLoggerConfig.Level = zap.NewAtomicLevelAt(zapcore.Level(level)) | |||
zapLogger, err = zapLoggerConfig.Build() | |||
if err != nil { | |||
return err | |||
} | |||
log = zapLogger.Sugar() | |||
return nil | |||
} | |||
// SetLoggerCallerDisable: disable caller info in production env for performance improve. | |||
// It is highly recommended that you execute this method in a production environment. | |||
func SetLoggerCallerDisable() error { | |||
var err error | |||
zapLoggerConfig.Development = false | |||
zapLoggerConfig.DisableCaller = true | |||
zapLogger, err = zapLoggerConfig.Build() | |||
if err != nil { | |||
return err | |||
} | |||
log = zapLogger.Sugar() | |||
return nil | |||
} | |||
// Debug ... | |||
func Debug(v ...interface{}) { | |||
log.Debug(v...) | |||
@@ -165,4 +195,4 @@ func Fatal(v ...interface{}) { | |||
// Fatalf ... | |||
func Fatalf(format string, v ...interface{}) { | |||
log.Fatalf(format, v...) | |||
} | |||
} |
@@ -0,0 +1,209 @@ | |||
package parser | |||
import ( | |||
"fmt" | |||
"os" | |||
"reflect" | |||
"sort" | |||
"strings" | |||
"gopkg.in/yaml.v2" | |||
"github.com/transaction-wg/seata-golang/pkg/util/log" | |||
) | |||
type envVar struct { | |||
name string | |||
value string | |||
} | |||
type envVars []envVar | |||
func (a envVars) Len() int { return len(a) } | |||
func (a envVars) Swap(i, j int) { a[i], a[j] = a[j], a[i] } | |||
func (a envVars) Less(i, j int) bool { return a[i].name < a[j].name } | |||
// Parser can be used to parse a configuration file and environment | |||
// into a unified output structure | |||
type Parser struct { | |||
prefix string | |||
env envVars | |||
} | |||
// NewParser returns a *Parser with the given environment prefix which handles | |||
// configurations which match the given parseInfos | |||
func NewParser(prefix string) *Parser { | |||
p := Parser{prefix: prefix} | |||
for _, env := range os.Environ() { | |||
envParts := strings.SplitN(env, "=", 2) | |||
p.env = append(p.env, envVar{envParts[0], envParts[1]}) | |||
} | |||
// We must sort the environment variables lexically by name so that | |||
// more specific variables are applied before less specific ones | |||
// but it's a lot simpler and easier to get right than unmarshalling | |||
// map entries into temporaries and merging with the existing entry. | |||
sort.Sort(p.env) | |||
return &p | |||
} | |||
// Parse reads in the given []byte and environment and writes the resulting | |||
// configuration into the input v | |||
// | |||
// Environment variables may be used to override configuration parameters, | |||
// following the scheme below: | |||
// v.Abc may be replaced by the value of PREFIX_ABC, | |||
// v.Abc.Xyz may be replaced by the value of PREFIX_ABC_XYZ, and so forth | |||
func (p *Parser) Parse(in []byte, v interface{}) error { | |||
err := yaml.Unmarshal(in, v) | |||
if err != nil { | |||
return err | |||
} | |||
for _, envVar := range p.env { | |||
pathStr := envVar.name | |||
if strings.HasPrefix(pathStr, strings.ToUpper(p.prefix)+"_") { | |||
path := strings.Split(pathStr, "_") | |||
err = p.overwriteFields(reflect.ValueOf(v), pathStr, path[1:], envVar.value) | |||
if err != nil { | |||
return err | |||
} | |||
} | |||
} | |||
return nil | |||
} | |||
// overwriteFields replaces configuration values with alternate values specified | |||
// through the environment. Precondition: an empty path slice must never be | |||
// passed in. | |||
func (p *Parser) overwriteFields(v reflect.Value, fullPath string, path []string, payload string) error { | |||
for v.Kind() == reflect.Ptr { | |||
if v.IsNil() { | |||
panic("encountered nil pointer while handling environment variable " + fullPath) | |||
} | |||
v = reflect.Indirect(v) | |||
} | |||
switch v.Kind() { | |||
case reflect.Struct: | |||
return p.overwriteStruct(v, fullPath, path, payload) | |||
case reflect.Map: | |||
return p.overwriteMap(v, fullPath, path, payload) | |||
case reflect.Interface: | |||
if v.NumMethod() == 0 { | |||
if !v.IsNil() { | |||
return p.overwriteFields(v.Elem(), fullPath, path, payload) | |||
} | |||
// Interface was empty; create an implicit map | |||
var template map[string]interface{} | |||
wrappedV := reflect.MakeMap(reflect.TypeOf(template)) | |||
v.Set(wrappedV) | |||
return p.overwriteMap(wrappedV, fullPath, path, payload) | |||
} | |||
} | |||
return nil | |||
} | |||
func (p *Parser) overwriteStruct(v reflect.Value, fullpath string, path []string, payload string) error { | |||
// Generate case-insensitive map of struct fields | |||
byUpperCase := make(map[string]int) | |||
for i := 0; i < v.NumField(); i++ { | |||
sf := v.Type().Field(i) | |||
upper := strings.ToUpper(sf.Name) | |||
if _, present := byUpperCase[upper]; present { | |||
panic(fmt.Sprintf("field name collision in configuration object: %s", sf.Name)) | |||
} | |||
byUpperCase[upper] = i | |||
} | |||
fieldIndex, present := byUpperCase[path[0]] | |||
if !present { | |||
log.Warnf("ignoring unrecognized environment variable %s", fullpath) | |||
return nil | |||
} | |||
field := v.Field(fieldIndex) | |||
sf := v.Type().Field(fieldIndex) | |||
if len(path) == 1 { | |||
// Env var specifies this field directly | |||
fieldVal := reflect.New(sf.Type) | |||
err := yaml.Unmarshal([]byte(payload), fieldVal.Interface()) | |||
if err != nil { | |||
return err | |||
} | |||
field.Set(reflect.Indirect(fieldVal)) | |||
return nil | |||
} | |||
// If the field is nil, must create an object | |||
switch sf.Type.Kind() { | |||
case reflect.Map: | |||
if field.IsNil() { | |||
field.Set(reflect.MakeMap(sf.Type)) | |||
} | |||
case reflect.Ptr: | |||
if field.IsNil() { | |||
field.Set(reflect.New(field.Type().Elem())) | |||
} | |||
} | |||
err := p.overwriteFields(field, fullpath, path[1:], payload) | |||
if err != nil { | |||
return err | |||
} | |||
return nil | |||
} | |||
func (p *Parser) overwriteMap(m reflect.Value, fullpath string, path []string, payload string) error { | |||
if m.Type().Key().Kind() != reflect.String { | |||
// non-string keys unsupported | |||
log.Warnf("ignoring environment variable %s involving map with non-string keys", fullpath) | |||
return nil | |||
} | |||
if len(path) > 1 { | |||
// If a matching key exists, get its value and continue the | |||
// overwriting process. | |||
for _, k := range m.MapKeys() { | |||
if strings.ToUpper(k.String()) == path[0] { | |||
mapValue := m.MapIndex(k) | |||
// If the existing value is nil, we want to | |||
// recreate it instead of using this value. | |||
if (mapValue.Kind() == reflect.Ptr || | |||
mapValue.Kind() == reflect.Interface || | |||
mapValue.Kind() == reflect.Map) && | |||
mapValue.IsNil() { | |||
break | |||
} | |||
return p.overwriteFields(mapValue, fullpath, path[1:], payload) | |||
} | |||
} | |||
} | |||
// (Re)create this key | |||
var mapValue reflect.Value | |||
if m.Type().Elem().Kind() == reflect.Map { | |||
mapValue = reflect.MakeMap(m.Type().Elem()) | |||
} else { | |||
mapValue = reflect.New(m.Type().Elem()) | |||
} | |||
if len(path) > 1 { | |||
err := p.overwriteFields(mapValue, fullpath, path[1:], payload) | |||
if err != nil { | |||
return err | |||
} | |||
} else { | |||
err := yaml.Unmarshal([]byte(payload), mapValue.Interface()) | |||
if err != nil { | |||
return err | |||
} | |||
} | |||
m.SetMapIndex(reflect.ValueOf(strings.ToLower(path[0])), reflect.Indirect(mapValue)) | |||
return nil | |||
} |
@@ -0,0 +1,110 @@ | |||
package uuid | |||
import ( | |||
"fmt" | |||
"math/rand" | |||
"net" | |||
"sync/atomic" | |||
time2 "github.com/transaction-wg/seata-golang/pkg/util/time" | |||
) | |||
const ( | |||
// Start time cut (2020-05-03) | |||
epoch uint64 = 1588435200000 | |||
// The number of bits occupied by workerID | |||
workerIDBits = 10 | |||
// The number of bits occupied by timestamp | |||
timestampBits = 41 | |||
// The number of bits occupied by sequence | |||
sequenceBits = 12 | |||
// Maximum supported machine id, the result is 1023 | |||
maxWorkerID = -1 ^ (-1 << workerIDBits) | |||
// mask that help to extract timestamp and sequence from a long | |||
timestampAndSequenceMask uint64 = -1 ^ (-1 << (timestampBits + sequenceBits)) | |||
) | |||
// timestamp and sequence mix in one Long | |||
// highest 11 bit: not used | |||
// middle 41 bit: timestamp | |||
// lowest 12 bit: sequence | |||
var timestampAndSequence uint64 = 0 | |||
// business meaning: machine ID (0 ~ 1023) | |||
// actual layout in memory: | |||
// highest 1 bit: 0 | |||
// middle 10 bit: workerID | |||
// lowest 53 bit: all 0 | |||
var workerID = generateWorkerID() << (timestampBits + sequenceBits) | |||
func init() { | |||
timestamp := getNewestTimestamp() | |||
timestampWithSequence := timestamp << sequenceBits | |||
atomic.StoreUint64(×tampAndSequence, timestampWithSequence) | |||
} | |||
func Init(serverNode int64) error { | |||
if serverNode > maxWorkerID || serverNode < 0 { | |||
return fmt.Errorf("worker id can't be greater than %d or less than 0", maxWorkerID) | |||
} | |||
workerID = serverNode << (timestampBits + sequenceBits) | |||
return nil | |||
} | |||
func NextID() int64 { | |||
waitIfNecessary() | |||
next := atomic.AddUint64(×tampAndSequence, 1) | |||
timestampWithSequence := next & timestampAndSequenceMask | |||
return int64(uint64(workerID) | timestampWithSequence) | |||
} | |||
func waitIfNecessary() { | |||
currentWithSequence := atomic.LoadUint64(×tampAndSequence) | |||
current := currentWithSequence >> sequenceBits | |||
newest := getNewestTimestamp() | |||
for current >= newest { | |||
newest = getNewestTimestamp() | |||
} | |||
} | |||
// get newest timestamp relative to twepoch | |||
func getNewestTimestamp() uint64 { | |||
return time2.CurrentTimeMillis() - epoch | |||
} | |||
// auto generate workerID, try using mac first, if failed, then randomly generate one | |||
func generateWorkerID() int64 { | |||
id, err := generateWorkerIDBaseOnMac() | |||
if err != nil { | |||
id = generateRandomWorkerID() | |||
} | |||
return id | |||
} | |||
// use lowest 10 bit of available MAC as workerID | |||
func generateWorkerIDBaseOnMac() (int64, error) { | |||
ifaces, _ := net.Interfaces() | |||
for _, iface := range ifaces { | |||
if iface.Flags&net.FlagUp == 0 { | |||
continue // interface down | |||
} | |||
if iface.Flags&net.FlagLoopback != 0 { | |||
continue // loopback interface | |||
} | |||
mac := iface.HardwareAddr | |||
return int64(int((mac[4]&0b11)<<8) | int(mac[5]&0xFF)), nil | |||
} | |||
return 0, fmt.Errorf("no available mac found") | |||
} | |||
// randomly generate one as workerID | |||
func generateRandomWorkerID() int64 { | |||
return rand.Int63n(maxWorkerID + 1) | |||
} |
@@ -1,47 +0,0 @@ | |||
package uuid | |||
import ( | |||
"github.com/transaction-wg/seata-golang/pkg/util/time" | |||
"sync/atomic" | |||
) | |||
var ( | |||
UUID int64 = 1000 | |||
serverNodeID = 1 | |||
UUID_INTERNAL int64 = 2000000000 | |||
initUUID int64 = 0 | |||
) | |||
func GeneratorUUID() int64 { | |||
id := atomic.AddInt64(&UUID, 1) | |||
if id >= GetMaxUUID() { | |||
if UUID >= id { | |||
newID := id - UUID_INTERNAL | |||
atomic.CompareAndSwapInt64(&UUID, id, newID) | |||
return newID | |||
} | |||
} | |||
return id | |||
} | |||
func SetUUID(expect int64, update int64) bool { | |||
return atomic.CompareAndSwapInt64(&UUID, expect, update) | |||
} | |||
func GetMaxUUID() int64 { | |||
return UUID_INTERNAL * (int64(serverNodeID) + 1) | |||
} | |||
func GetInitUUID() int64 { | |||
return initUUID | |||
} | |||
func Init(svrNodeID int) { | |||
// 2019-01-01 与 java 版 seata 一致 | |||
var base uint64 = 1546272000000 | |||
serverNodeID = svrNodeID | |||
atomic.CompareAndSwapInt64(&UUID, UUID, UUID_INTERNAL*int64(serverNodeID)) | |||
current := time.CurrentTimeMillis() | |||
id := atomic.AddInt64(&UUID, int64((current-base)/time.UnixTimeUnitOffset)) | |||
initUUID = id | |||
} |