Compare commits

...

17 Commits

Author SHA1 Message Date
boyce
af15615345 优化日志生成路径 2025-03-14 18:03:01 +08:00
boyce
50dd80b082 整理代码 2025-03-10 11:35:19 +08:00
boyce
a6487dd41e 将默认日志改为rotatelogs 2025-01-25 00:14:18 +08:00
boyce
d5299294d8 优化日志,新增rotatelogs库支持 2025-01-25 00:04:31 +08:00
boyce
4d36e525a5 优化配置读取,去消默认cluster目录 2025-01-16 13:45:06 +08:00
duanhf2012
3a4350769c 新增etcd认证配置 2025-01-08 18:11:20 +08:00
duanhf2012
d4966ea129 优化ws模块 2024-12-17 14:46:00 +08:00
duanhf2012
3b10eeb792 优化日志 2024-12-16 18:00:26 +08:00
duanhf2012
e3275e9f2a 优化模块释放顺序 2024-12-11 18:31:37 +08:00
duanhf2012
16745b34f0 优化日志 2024-12-11 17:49:59 +08:00
duanhf2012
f34dc7d53f 优化日志自定义Writer 2024-12-11 17:24:06 +08:00
duanhf2012
0a09dc2fee 优化日志 2024-12-11 17:14:29 +08:00
duanhf2012
f01a93c446 优化日志 2024-12-11 17:03:21 +08:00
duanhf2012
4d2ab4ee4f 优化代码 2024-12-11 16:44:09 +08:00
duanhf2012
ffcc5a3489 1.优化服务配置检查
2.废弃SetGoRoutineNum接口
3.释放Module优化
2024-12-06 16:05:25 +08:00
duanhf2012
cf6ca0483b Merge branch 'v2' of https://github.com/duanhf2012/origin into v2 2024-12-05 10:19:24 +08:00
duanhf2012
97a21e6f71 新增Skip接口 2024-12-05 10:19:15 +08:00
11 changed files with 309 additions and 167 deletions

View File

@@ -40,8 +40,6 @@ type NodeInfo struct {
DiscoveryService []DiscoveryService //筛选发现的服务,如果不配置,不进行筛选
status NodeStatus
Retire bool
NetworkName string
}
type NodeRpcInfo struct {

View File

@@ -3,24 +3,23 @@ package cluster
import "github.com/duanhf2012/origin/v2/rpc"
type ConfigDiscovery struct {
funDelNode FunDelNode
funSetNode FunSetNode
funDelNode FunDelNode
funSetNode FunSetNode
localNodeId string
}
func (discovery *ConfigDiscovery) InitDiscovery(localNodeId string,funDelNode FunDelNode,funSetNode FunSetNode) error{
func (discovery *ConfigDiscovery) InitDiscovery(localNodeId string, funDelNode FunDelNode, funSetNode FunSetNode) error {
discovery.localNodeId = localNodeId
discovery.funDelNode = funDelNode
discovery.funSetNode = funSetNode
//解析本地其他服务配置
_,nodeInfoList,_,err := GetCluster().readLocalClusterConfig(rpc.NodeIdNull)
_, nodeInfoList, _, err := GetCluster().readLocalClusterConfig(rpc.NodeIdNull)
if err != nil {
return err
}
for _,nodeInfo := range nodeInfoList {
for _, nodeInfo := range nodeInfoList {
if nodeInfo.NodeId == localNodeId {
continue
}
@@ -30,5 +29,3 @@ func (discovery *ConfigDiscovery) InitDiscovery(localNodeId string,funDelNode Fu
return nil
}

View File

@@ -5,17 +5,17 @@ import (
"github.com/duanhf2012/origin/v2/service"
)
func (cls *Cluster) setupDiscovery(localNodeId string, setupServiceFun SetupServiceFun) error{
func (cls *Cluster) setupDiscovery(localNodeId string, setupServiceFun SetupServiceFun) error {
if cls.discoveryInfo.getDiscoveryType() == OriginType { //origin类型服务发现
return cls.setupOriginDiscovery(localNodeId,setupServiceFun)
}else if cls.discoveryInfo.getDiscoveryType() == EtcdType{//etcd类型服务发现
return cls.setupEtcdDiscovery(localNodeId,setupServiceFun)
return cls.setupOriginDiscovery(localNodeId, setupServiceFun)
} else if cls.discoveryInfo.getDiscoveryType() == EtcdType { //etcd类型服务发现
return cls.setupEtcdDiscovery(localNodeId, setupServiceFun)
}
return cls.setupConfigDiscovery(localNodeId,setupServiceFun)
return cls.setupConfigDiscovery(localNodeId, setupServiceFun)
}
func (cls *Cluster) setupOriginDiscovery(localNodeId string, setupServiceFun SetupServiceFun) error{
func (cls *Cluster) setupOriginDiscovery(localNodeId string, setupServiceFun SetupServiceFun) error {
if cls.serviceDiscovery != nil {
return errors.New("service discovery has been setup")
}
@@ -27,6 +27,7 @@ func (cls *Cluster) setupOriginDiscovery(localNodeId string, setupServiceFun Set
}
cls.serviceDiscovery = getOriginDiscovery()
//2.如果为动态服务发现安装本地发现服务
if localMaster == true {
setupServiceFun(&masterService)
@@ -36,11 +37,10 @@ func (cls *Cluster) setupOriginDiscovery(localNodeId string, setupServiceFun Set
setupServiceFun(&clientService)
cls.AddDiscoveryService(OriginDiscoveryClientName, true)
return nil
}
func (cls *Cluster) setupEtcdDiscovery(localNodeId string, setupServiceFun SetupServiceFun) error{
func (cls *Cluster) setupEtcdDiscovery(localNodeId string, setupServiceFun SetupServiceFun) error {
if cls.serviceDiscovery != nil {
return errors.New("service discovery has been setup")
}
@@ -48,12 +48,12 @@ func (cls *Cluster) setupEtcdDiscovery(localNodeId string, setupServiceFun Setup
//setup etcd service
cls.serviceDiscovery = getEtcdDiscovery()
setupServiceFun(cls.serviceDiscovery.(service.IService))
cls.AddDiscoveryService(cls.serviceDiscovery.(service.IService).GetName(),false)
cls.AddDiscoveryService(cls.serviceDiscovery.(service.IService).GetName(), false)
return nil
}
func (cls *Cluster) setupConfigDiscovery(localNodeId string, setupServiceFun SetupServiceFun) error{
func (cls *Cluster) setupConfigDiscovery(localNodeId string, setupServiceFun SetupServiceFun) error {
if cls.serviceDiscovery != nil {
return errors.New("service discovery has been setup")
}

View File

@@ -1,6 +1,11 @@
package cluster
import (
"context"
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"github.com/duanhf2012/origin/v2/event"
"github.com/duanhf2012/origin/v2/log"
"github.com/duanhf2012/origin/v2/rpc"
@@ -9,14 +14,11 @@ import (
"go.etcd.io/etcd/api/v3/mvccpb"
"go.etcd.io/etcd/client/v3"
"google.golang.org/protobuf/proto"
"time"
"context"
"errors"
"fmt"
"go.uber.org/zap"
"os"
"path"
"strings"
"sync/atomic"
"time"
)
const originDir = "/origin"
@@ -40,8 +42,13 @@ type EtcdDiscoveryService struct {
mapDiscoveryNodeId map[string]map[string]struct{} //map[networkName]map[nodeId]
}
var etcdDiscovery *EtcdDiscoveryService
func getEtcdDiscovery() IServiceDiscovery {
etcdDiscovery := &EtcdDiscoveryService{}
if etcdDiscovery == nil {
etcdDiscovery = &EtcdDiscoveryService{}
}
return etcdDiscovery
}
@@ -87,15 +94,43 @@ func (ed *EtcdDiscoveryService) OnInit() error {
}
for i := 0; i < len(etcdDiscoveryCfg.EtcdList); i++ {
client, cerr := clientv3.New(clientv3.Config{
var client *clientv3.Client
var tlsConfig *tls.Config
if etcdDiscoveryCfg.EtcdList[i].Cert != "" {
// load cert
cert, cErr := tls.LoadX509KeyPair(etcdDiscoveryCfg.EtcdList[i].Cert, etcdDiscoveryCfg.EtcdList[i].CertKey)
if cErr != nil {
log.Error("load cert error", log.ErrorField("err", cErr))
return cErr
}
// load root ca
caData, cErr := os.ReadFile(etcdDiscoveryCfg.EtcdList[i].Ca)
if cErr != nil {
log.Error("load root ca error", log.ErrorField("err", cErr))
return cErr
}
pool := x509.NewCertPool()
pool.AppendCertsFromPEM(caData)
tlsConfig = &tls.Config{
Certificates: []tls.Certificate{cert},
RootCAs: pool,
}
}
client, err = clientv3.New(clientv3.Config{
Endpoints: etcdDiscoveryCfg.EtcdList[i].Endpoints,
DialTimeout: etcdDiscoveryCfg.DialTimeoutMillisecond,
Logger: zap.NewNop(),
Username: etcdDiscoveryCfg.EtcdList[i].UserName,
Password: etcdDiscoveryCfg.EtcdList[i].Password,
Logger: log.GetLogger().Logger,
TLS: tlsConfig,
})
if cerr != nil {
log.Error("etcd discovery init fail", log.ErrorField("err", cerr))
return cerr
if err != nil {
log.Error("etcd discovery init fail", log.ErrorField("err", err))
return err
}
ctx, _ := context.WithTimeout(context.Background(), time.Second*3)

View File

@@ -7,6 +7,7 @@ import (
"github.com/duanhf2012/origin/v2/rpc"
jsoniter "github.com/json-iterator/go"
"gopkg.in/yaml.v3"
"io/fs"
"os"
"path/filepath"
"strings"
@@ -15,9 +16,15 @@ import (
var json = jsoniter.ConfigCompatibleWithStandardLibrary
type EtcdList struct {
NetworkName []string
Endpoints []string
UserName string
Password string
Cert string
CertKey string
Ca string
}
type EtcdDiscovery struct {
@@ -64,12 +71,8 @@ type NodeInfoList struct {
NodeList []NodeInfo
}
func validConfigFile(f os.DirEntry) bool {
if f.IsDir() == true || (filepath.Ext(f.Name()) != ".json" && filepath.Ext(f.Name()) != ".yml" && filepath.Ext(f.Name()) != ".yaml") {
return false
}
return true
func validConfigFile(f string) bool {
return strings.HasSuffix(f, ".json")|| strings.HasSuffix(f, ".yml") || strings.HasSuffix(f, ".yaml")
}
func yamlToJson(data []byte, v interface{}) ([]byte, error) {
@@ -271,32 +274,33 @@ func (cls *Cluster) readLocalClusterConfig(nodeId string) (DiscoveryInfo, []Node
var discoveryInfo DiscoveryInfo
var rpcMode RpcMode
clusterCfgPath := strings.TrimRight(configDir, "/") + "/cluster"
fileInfoList, err := os.ReadDir(clusterCfgPath)
if err != nil {
return discoveryInfo, nil, rpcMode, fmt.Errorf("read dir %s is fail :%+v", clusterCfgPath, err)
}
//读取任何文件,只读符合格式的配置,目录下的文件可以自定义分文件
for _, f := range fileInfoList {
if !validConfigFile(f) {
continue
err := filepath.Walk(configDir, func(path string, info fs.FileInfo, err error)error {
if info.IsDir() {
return nil
}
filePath := strings.TrimRight(strings.TrimRight(clusterCfgPath, "/"), "\\") + "/" + f.Name()
fileNodeInfoList, rErr := cls.ReadClusterConfig(filePath)
if err != nil {
return err
}
if !validConfigFile(info.Name()) {
return nil
}
fileNodeInfoList, rErr := cls.ReadClusterConfig(path)
if rErr != nil {
return discoveryInfo, nil, rpcMode, fmt.Errorf("read file path %s is error:%+v", filePath, rErr)
return fmt.Errorf("read file path %s is error:%+v", path, rErr)
}
err = cls.SetRpcMode(&fileNodeInfoList.RpcMode, &rpcMode)
if err != nil {
return discoveryInfo, nil, rpcMode, err
return err
}
err = discoveryInfo.setDiscovery(&fileNodeInfoList.Discovery)
if err != nil {
return discoveryInfo, nil, rpcMode, err
return err
}
for _, nodeInfo := range fileNodeInfoList.NodeList {
@@ -304,6 +308,12 @@ func (cls *Cluster) readLocalClusterConfig(nodeId string) (DiscoveryInfo, []Node
nodeInfoList = append(nodeInfoList, nodeInfo)
}
}
return nil
})
if err != nil {
return discoveryInfo, nil, rpcMode, err
}
if nodeId != rpc.NodeIdNull && (len(nodeInfoList) != 1) {
@@ -325,32 +335,32 @@ func (cls *Cluster) readLocalClusterConfig(nodeId string) (DiscoveryInfo, []Node
}
func (cls *Cluster) readLocalService(localNodeId string) error {
clusterCfgPath := strings.TrimRight(configDir, "/") + "/cluster"
fileInfoList, err := os.ReadDir(clusterCfgPath)
if err != nil {
return fmt.Errorf("read dir %s is fail :%+v", clusterCfgPath, err)
}
var globalCfg interface{}
publicService := map[string]interface{}{}
nodeService := map[string]interface{}{}
//读取任何文件,只读符合格式的配置,目录下的文件可以自定义分文件
for _, f := range fileInfoList {
if !validConfigFile(f) {
continue
err := filepath.Walk(configDir, func(path string, info fs.FileInfo, err error)error{
if info.IsDir() {
return nil
}
filePath := strings.TrimRight(strings.TrimRight(clusterCfgPath, "/"), "\\") + "/" + f.Name()
currGlobalCfg, serviceConfig, mapNodeService, err := cls.readServiceConfig(filePath)
if err != nil {
continue
return err
}
if !validConfigFile(info.Name()) {
return nil
}
currGlobalCfg, serviceConfig, mapNodeService, err := cls.readServiceConfig(path)
if err != nil {
return err
}
if currGlobalCfg != nil {
//不允许重复的配置global配置
if globalCfg != nil {
return fmt.Errorf("[Global] does not allow repeated configuration in %s", f.Name())
return fmt.Errorf("[Global] does not allow repeated configuration in %s", info.Name())
}
globalCfg = currGlobalCfg
}
@@ -366,7 +376,7 @@ func (cls *Cluster) readLocalService(localNodeId string) error {
pubCfg, ok := serviceConfig[s]
if ok == true {
if _, publicOk := publicService[s]; publicOk == true {
return fmt.Errorf("public service [%s] does not allow repeated configuration in %s", s, f.Name())
return fmt.Errorf("public service [%s] does not allow repeated configuration in %s", s, info.Name())
}
publicService[s] = pubCfg
}
@@ -382,12 +392,17 @@ func (cls *Cluster) readLocalService(localNodeId string) error {
}
if _, nodeOK := nodeService[s]; nodeOK == true {
return fmt.Errorf("NodeService NodeId[%s] Service[%s] does not allow repeated configuration in %s", cls.localNodeInfo.NodeId, s, f.Name())
return fmt.Errorf("NodeService NodeId[%s] Service[%s] does not allow repeated configuration in %s", cls.localNodeInfo.NodeId, s, info.Name())
}
nodeService[s] = nodeCfg
break
}
}
return nil
})
if err != nil {
return err
}
//组合所有的配置
@@ -417,13 +432,12 @@ func (cls *Cluster) readLocalService(localNodeId string) error {
return nil
}
func (cls *Cluster) parseLocalCfg() {
func (cls *Cluster) parseLocalCfg() error{
rpcInfo := NodeRpcInfo{}
rpcInfo.nodeInfo = cls.localNodeInfo
rpcInfo.client = rpc.NewLClient(rpcInfo.nodeInfo.NodeId, &cls.callSet)
cls.mapRpc[cls.localNodeInfo.NodeId] = &rpcInfo
for _, serviceName := range cls.localNodeInfo.ServiceList {
splitServiceName := strings.Split(serviceName, ":")
if len(splitServiceName) == 2 {
@@ -440,8 +454,13 @@ func (cls *Cluster) parseLocalCfg() {
cls.mapServiceNode[serviceName] = make(map[string]struct{})
}
if _,ok:=cls.mapServiceNode[serviceName][cls.localNodeInfo.NodeId];ok {
return fmt.Errorf("duplicate service %s is configured in node %s", serviceName, cls.localNodeInfo.NodeId)
}
cls.mapServiceNode[serviceName][cls.localNodeInfo.NodeId] = struct{}{}
}
return nil
}
func (cls *Cluster) IsNatsMode() bool {
@@ -474,8 +493,7 @@ func (cls *Cluster) InitCfg(localNodeId string) error {
}
//本地配置服务加到全局map信息中
cls.parseLocalCfg()
return nil
return cls.parseLocalCfg()
}
func (cls *Cluster) IsConfigService(serviceName string) bool {

View File

@@ -1,36 +1,53 @@
package log
import (
"github.com/duanhf2012/rotatelogs"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"gopkg.in/natefinch/lumberjack.v2"
"os"
"path/filepath"
"strings"
"time"
)
var isSetLogger bool
var gLogger = NewDefaultLogger()
var LogLevel zapcore.Level
var MaxSize int
var LogPath string
var OpenConsole *bool
var LogChanLen int
type Logger struct {
*zap.Logger
stack bool
OpenConsole *bool
LogPath string
FileName string
LogLevel zapcore.Level
Encoder zapcore.Encoder
LogConfig *lumberjack.Logger
sugaredLogger *zap.SugaredLogger
FileName string
Skip int
Encoder zapcore.Encoder
SugaredLogger *zap.SugaredLogger
CoreList []zapcore.Core
WriteSyncerFun []func() zapcore.WriteSyncer
}
// 设置Logger
func SetLogger(logger *Logger) {
if logger != nil && isSetLogger == false {
if logger != nil {
gLogger = logger
isSetLogger = true
}
}
// 设置ZapLogger
func SetZapLogger(zapLogger *zap.Logger) {
if zapLogger != nil {
gLogger = &Logger{}
gLogger.Logger = zapLogger
isSetLogger = true
}
}
func GetLogger() *Logger {
return gLogger
}
@@ -39,10 +56,14 @@ func (logger *Logger) SetEncoder(encoder zapcore.Encoder) {
logger.Encoder = encoder
}
func (logger *Logger) SetSkip(skip int) {
logger.Skip = skip
}
func GetJsonEncoder() zapcore.Encoder {
encoderConfig := zap.NewProductionEncoderConfig()
encoderConfig.EncodeLevel = zapcore.CapitalLevelEncoder
encoderConfig.EncodeCaller = zapcore.ShortCallerEncoder
encoderConfig.EncodeLevel = zapcore.CapitalLevelEncoder
encoderConfig.EncodeTime = func(t time.Time, enc zapcore.PrimitiveArrayEncoder) {
enc.AppendString(t.Format("2006-01-02 15:04:05.000"))
}
@@ -61,51 +82,105 @@ func GetTxtEncoder() zapcore.Encoder {
return zapcore.NewConsoleEncoder(encoderConfig)
}
func getLogConfig() *lumberjack.Logger {
func (logger *Logger) getLogConfig() *lumberjack.Logger {
return &lumberjack.Logger{
Filename: "",
MaxSize: 2048,
Filename: filepath.Join(LogPath, logger.FileName),
MaxSize: MaxSize,
MaxBackups: 0,
MaxAge: 0,
Compress: false,
LocalTime: true,
}
}
func NewDefaultLogger() *Logger {
logger := Logger{}
logger.Encoder = GetJsonEncoder()
logger.LogConfig = getLogConfig()
logger.LogConfig.LocalTime = true
core := zapcore.NewCore(logger.Encoder, zapcore.AddSync(os.Stdout), zap.InfoLevel)
logger.Logger = zap.New(core, zap.AddCaller(), zap.AddCallerSkip(1))
logger.Init()
return &logger
}
func (logger *Logger) SetLogLevel(level zapcore.Level) {
logger.LogLevel = level
func (logger *Logger) SetSyncers(syncers ...func() zapcore.WriteSyncer) {
logger.WriteSyncerFun = syncers
}
func (logger *Logger) AppendSyncerFun(syncerFun func() zapcore.WriteSyncer) {
logger.WriteSyncerFun = append(logger.WriteSyncerFun, syncerFun)
}
func SetLogLevel(level zapcore.Level) {
LogLevel = level
}
func (logger *Logger) Enabled(zapcore.Level) bool {
return logger.stack
}
func (logger *Logger) Init() {
var coreList []zapcore.Core
func (logger *Logger) NewLumberjackWriter() zapcore.WriteSyncer {
return zapcore.AddSync(
&lumberjack.Logger{
Filename: filepath.Join(LogPath, logger.FileName),
MaxSize: MaxSize,
MaxBackups: 0,
MaxAge: 0,
Compress: false,
LocalTime: true,
})
}
if logger.OpenConsole == nil || *logger.OpenConsole {
core := zapcore.NewCore(logger.Encoder, zapcore.AddSync(os.Stdout), logger.LogLevel)
func (logger *Logger) NewRotatelogsWriter() zapcore.WriteSyncer {
var options []rotatelogs.Option
if MaxSize > 0 {
options = append(options, rotatelogs.WithRotateMaxSize(int64(MaxSize)))
}
if LogChanLen > 0 {
options = append(options, rotatelogs.WithChannelLen(LogChanLen))
}
options = append(options, rotatelogs.WithRotationTime(time.Hour*24))
fileName := strings.TrimRight(logger.FileName, filepath.Ext(logger.FileName))
rotateLogs, err := rotatelogs.NewRotateLogs(LogPath, "20060102/"+fileName+"_20060102_150405", options...)
if err != nil {
panic(err)
}
return zapcore.AddSync(rotateLogs)
}
func (logger *Logger) Init() {
if isSetLogger {
return
}
var syncerList []zapcore.WriteSyncer
if logger.WriteSyncerFun == nil {
syncerList = append(syncerList, logger.NewRotatelogsWriter())
} else {
for _, syncer := range logger.WriteSyncerFun {
syncerList = append(syncerList, syncer())
}
}
var coreList []zapcore.Core
if OpenConsole == nil || *OpenConsole {
syncerList = append(syncerList, zapcore.AddSync(os.Stdout))
}
for _, writer := range syncerList {
core := zapcore.NewCore(logger.Encoder, writer, LogLevel)
coreList = append(coreList, core)
}
if logger.LogPath != "" {
writeSyncer := zapcore.AddSync(logger.LogConfig)
core := zapcore.NewCore(logger.Encoder, writeSyncer, logger.LogLevel)
coreList = append(coreList, core)
if logger.CoreList != nil {
coreList = append(coreList, logger.CoreList...)
}
core := zapcore.NewTee(coreList...)
logger.Logger = zap.New(core, zap.AddCaller(), zap.AddStacktrace(logger), zap.AddCallerSkip(1))
logger.sugaredLogger = logger.Logger.Sugar()
logger.Logger = zap.New(core, zap.AddCaller(), zap.AddStacktrace(logger), zap.AddCallerSkip(1+logger.Skip))
logger.SugaredLogger = logger.Logger.Sugar()
}
func (logger *Logger) Debug(msg string, fields ...zap.Field) {
@@ -165,84 +240,84 @@ func Fatal(msg string, fields ...zap.Field) {
}
func Debugf(template string, args ...any) {
gLogger.sugaredLogger.Debugf(template, args...)
gLogger.SugaredLogger.Debugf(template, args...)
}
func Infof(template string, args ...any) {
gLogger.sugaredLogger.Infof(template, args...)
gLogger.SugaredLogger.Infof(template, args...)
}
func Warnf(template string, args ...any) {
gLogger.sugaredLogger.Warnf(template, args...)
gLogger.SugaredLogger.Warnf(template, args...)
}
func Errorf(template string, args ...any) {
gLogger.sugaredLogger.Errorf(template, args...)
gLogger.SugaredLogger.Errorf(template, args...)
}
func StackErrorf(template string, args ...any) {
gLogger.stack = true
gLogger.sugaredLogger.Errorf(template, args...)
gLogger.SugaredLogger.Errorf(template, args...)
gLogger.stack = false
}
func Fatalf(template string, args ...any) {
gLogger.sugaredLogger.Fatalf(template, args...)
gLogger.SugaredLogger.Fatalf(template, args...)
}
func (logger *Logger) SDebug(args ...interface{}) {
logger.sugaredLogger.Debugln(args...)
logger.SugaredLogger.Debugln(args...)
}
func (logger *Logger) SInfo(args ...interface{}) {
logger.sugaredLogger.Infoln(args...)
logger.SugaredLogger.Infoln(args...)
}
func (logger *Logger) SWarn(args ...interface{}) {
logger.sugaredLogger.Warnln(args...)
logger.SugaredLogger.Warnln(args...)
}
func (logger *Logger) SError(args ...interface{}) {
logger.sugaredLogger.Errorln(args...)
logger.SugaredLogger.Errorln(args...)
}
func (logger *Logger) SStackError(args ...interface{}) {
gLogger.stack = true
logger.sugaredLogger.Errorln(args...)
logger.SugaredLogger.Errorln(args...)
gLogger.stack = false
}
func (logger *Logger) SFatal(args ...interface{}) {
gLogger.stack = true
logger.sugaredLogger.Fatalln(args...)
logger.SugaredLogger.Fatalln(args...)
gLogger.stack = false
}
func SDebug(args ...interface{}) {
gLogger.sugaredLogger.Debugln(args...)
gLogger.SugaredLogger.Debugln(args...)
}
func SInfo(args ...interface{}) {
gLogger.sugaredLogger.Infoln(args...)
gLogger.SugaredLogger.Infoln(args...)
}
func SWarn(args ...interface{}) {
gLogger.sugaredLogger.Warnln(args...)
gLogger.SugaredLogger.Warnln(args...)
}
func SError(args ...interface{}) {
gLogger.sugaredLogger.Errorln(args...)
gLogger.SugaredLogger.Errorln(args...)
}
func SStackError(args ...interface{}) {
gLogger.stack = true
gLogger.sugaredLogger.Errorln(args...)
gLogger.SugaredLogger.Errorln(args...)
gLogger.stack = false
}
func SFatal(args ...interface{}) {
gLogger.stack = true
gLogger.sugaredLogger.Fatalln(args...)
gLogger.SugaredLogger.Fatalln(args...)
gLogger.stack = false
}

View File

@@ -17,7 +17,6 @@ import (
_ "net/http/pprof"
"os"
"os/signal"
"path/filepath"
"strconv"
"strings"
"syscall"
@@ -59,6 +58,7 @@ func init() {
console.RegisterCommandString("loglevel", "debug", "<-loglevel debug|info|warn|error|stackerror|fatal> Set loglevel.", setLevel)
console.RegisterCommandString("logpath", "", "<-logpath path> Set log file path.", setLogPath)
console.RegisterCommandInt("logsize", 0, "<-logsize size> Set log size(MB).", setLogSize)
console.RegisterCommandInt("logchanlen", 0, "<-logchanlen len> Set log channel len.", setLogChanLen)
console.RegisterCommandString("pprof", "", "<-pprof ip:port> Open performance analysis.", setPprof)
}
@@ -220,7 +220,7 @@ func initNode(id string) {
func initLog() error {
logger := log.GetLogger()
if logger.LogPath == "" {
if log.LogPath == "" {
err := setLogPath("./log")
if err != nil {
return err
@@ -230,8 +230,6 @@ func initLog() error {
localNodeInfo := cluster.GetCluster().GetLocalNodeInfo()
fileName := fmt.Sprintf("%s.log", localNodeInfo.NodeId)
logger.FileName = fileName
filepath.Join()
logger.LogConfig.Filename = filepath.Join(logger.LogPath, logger.FileName)
logger.Init()
return nil
@@ -441,10 +439,10 @@ func openConsole(args interface{}) error {
strOpen := strings.ToLower(strings.TrimSpace(args.(string)))
if strOpen == "false" {
bOpenConsole := false
log.GetLogger().OpenConsole = &bOpenConsole
log.OpenConsole = &bOpenConsole
} else if strOpen == "true" {
bOpenConsole := true
log.GetLogger().OpenConsole = &bOpenConsole
log.OpenConsole = &bOpenConsole
} else {
return errors.New("parameter console error")
}
@@ -459,17 +457,17 @@ func setLevel(args interface{}) error {
strlogLevel := strings.TrimSpace(args.(string))
switch strlogLevel {
case "debug":
log.GetLogger().LogLevel = zapcore.DebugLevel
log.LogLevel = zapcore.DebugLevel
case "info":
log.GetLogger().LogLevel = zapcore.InfoLevel
log.LogLevel = zapcore.InfoLevel
case "warn":
log.GetLogger().LogLevel = zapcore.WarnLevel
log.LogLevel = zapcore.WarnLevel
case "error":
log.GetLogger().LogLevel = zapcore.ErrorLevel
log.LogLevel = zapcore.ErrorLevel
case "stackerror":
log.GetLogger().LogLevel = zapcore.ErrorLevel
log.LogLevel = zapcore.ErrorLevel
case "fatal":
log.GetLogger().LogLevel = zapcore.FatalLevel
log.LogLevel = zapcore.FatalLevel
default:
return errors.New("unknown level: " + strlogLevel)
}
@@ -481,19 +479,15 @@ func setLogPath(args interface{}) error {
return nil
}
logPath := strings.TrimSpace(args.(string))
dir, err := os.Stat(logPath)
if err == nil && dir.IsDir() == false {
return errors.New("Not found dir " + logPath)
}
_, err := os.Stat(logPath)
if err != nil {
err = os.Mkdir(log.GetLogger().LogPath, os.ModePerm)
err = os.MkdirAll(logPath, os.ModePerm)
if err != nil {
return errors.New("Cannot create dir " + log.GetLogger().LogPath)
return errors.New("Cannot create dir " + logPath)
}
}
log.GetLogger().LogPath = logPath
log.LogPath = logPath
return nil
}
@@ -506,7 +500,20 @@ func setLogSize(args interface{}) error {
return nil
}
log.GetLogger().LogConfig.MaxSize = logSize
log.MaxSize = logSize
return nil
}
func setLogChanLen(args interface{}) error {
logChanLen, ok := args.(int)
if ok == false {
return errors.New("param logsize is error")
}
if logChanLen == 0 {
return nil
}
log.LogChanLen = logChanLen
return nil
}

View File

@@ -11,6 +11,7 @@ import (
"github.com/duanhf2012/origin/v2/log"
rpcHandle "github.com/duanhf2012/origin/v2/rpc"
"github.com/duanhf2012/origin/v2/util/timer"
"slices"
)
const InitModuleId = 1e9
@@ -46,7 +47,7 @@ type Module struct {
moduleName string //模块名称
parent IModule //父亲
self IModule //自己
child map[uint32]IModule //孩子们
child []IModule //孩子们
mapActiveTimer map[timer.ITimer]struct{}
mapActiveIdTimer map[uint64]timer.ITimer
dispatcher *timer.Dispatcher //timer
@@ -93,10 +94,7 @@ func (m *Module) AddModule(module IModule) (uint32, error) {
pAddModule.moduleId = m.NewModuleId()
}
if m.child == nil {
m.child = map[uint32]IModule{}
}
_, ok := m.child[module.GetModuleId()]
_,ok := m.ancestor.getBaseModule().(*Module).descendants[module.GetModuleId()]
if ok == true {
return 0, fmt.Errorf("exists module id %d", module.GetModuleId())
}
@@ -109,29 +107,33 @@ func (m *Module) AddModule(module IModule) (uint32, error) {
pAddModule.eventHandler = event.NewEventHandler()
pAddModule.eventHandler.Init(m.eventHandler.GetEventProcessor())
pAddModule.IConcurrent = m.IConcurrent
m.child = append(m.child,module)
m.ancestor.getBaseModule().(*Module).descendants[module.GetModuleId()] = module
err := module.OnInit()
if err != nil {
delete(m.ancestor.getBaseModule().(*Module).descendants, module.GetModuleId())
m.child = m.child[:len(m.child)-1]
log.Error("module OnInit error",log.String("ModuleName",module.GetModuleName()),log.ErrorField("err",err))
return 0, err
}
m.child[module.GetModuleId()] = module
m.ancestor.getBaseModule().(*Module).descendants[module.GetModuleId()] = module
log.Debug("Add module " + module.GetModuleName() + " completed")
return module.GetModuleId(), nil
}
func (m *Module) ReleaseModule(moduleId uint32) {
pModule := m.GetModule(moduleId).getBaseModule().(*Module)
pModule.self.OnRelease()
log.Debug("Release module " + pModule.GetModuleName())
//释放子孙
for id := range pModule.child {
m.ReleaseModule(id)
for i:=len(pModule.child)-1; i>=0; i-- {
m.ReleaseModule(pModule.child[i].GetModuleId())
}
pModule.self.OnRelease()
pModule.GetEventHandler().Destroy()
log.Debug("Release module " + pModule.GetModuleName())
for pTimer := range pModule.mapActiveTimer {
pTimer.Cancel()
}
@@ -140,7 +142,10 @@ func (m *Module) ReleaseModule(moduleId uint32) {
t.Cancel()
}
delete(m.child, moduleId)
m.child = slices.DeleteFunc(m.child, func(module IModule) bool {
return module.GetModuleId() == moduleId
})
delete(m.ancestor.getBaseModule().(*Module).descendants, moduleId)
//清理被删除的Module

View File

@@ -192,7 +192,7 @@ func (s *Service) run() {
break
}
if s.profiler != nil {
analyzer = s.profiler.Push("[Req]" + rpcRequest.RpcRequestData.GetServiceMethod())
analyzer = s.profiler.Push("[RpcReq]" + rpcRequest.RpcRequestData.GetServiceMethod()+"."+strconv.Itoa(int(rpcRequest.RpcRequestData.GetRpcMethodId())))
}
s.GetRpcHandler().HandlerRpcRequest(rpcRequest)
@@ -266,8 +266,10 @@ func (s *Service) Release() {
if atomic.AddInt32(&s.isRelease, -1) == -1 {
s.self.OnRelease()
for i:=len(s.child)-1; i>=0; i-- {
s.ReleaseModule(s.child[i].GetModuleId())
}
}
}
func (s *Service) OnRelease() {
@@ -432,6 +434,7 @@ func (s *Service) SetEventChannelNum(num int) {
}
}
// Deprecated: replace it with the OpenConcurrent function
func (s *Service) SetGoRoutineNum(goroutineNum int32) bool {
//已经开始状态不允许修改协程数量,打开性能分析器不允许开多线程
if s.startStatus == true || s.profiler != nil {

View File

@@ -14,7 +14,7 @@ import (
type WSModule struct {
service.Module
wsServer network.WSServer
WSServer network.WSServer
mapClientLocker sync.RWMutex
mapClient map[string]*WSClient
@@ -57,16 +57,16 @@ func (ws *WSModule) OnInit() error {
return fmt.Errorf("please call the Init function correctly")
}
ws.wsServer.MaxConnNum = ws.wsCfg.MaxConnNum
ws.wsServer.PendingWriteNum = ws.wsCfg.PendingWriteNum
ws.wsServer.MaxMsgLen = ws.wsCfg.MaxMsgLen
ws.wsServer.Addr = ws.wsCfg.ListenAddr
ws.WSServer.MaxConnNum = ws.wsCfg.MaxConnNum
ws.WSServer.PendingWriteNum = ws.wsCfg.PendingWriteNum
ws.WSServer.MaxMsgLen = ws.wsCfg.MaxMsgLen
ws.WSServer.Addr = ws.wsCfg.ListenAddr
//3.设置解析处理器
ws.process.SetByteOrder(ws.wsCfg.LittleEndian)
ws.mapClient = make(map[string]*WSClient, ws.wsServer.MaxConnNum)
ws.wsServer.NewAgent = ws.NewWSClient
ws.mapClient = make(map[string]*WSClient, ws.WSServer.MaxConnNum)
ws.WSServer.NewAgent = ws.NewWSClient
//4.设置网络事件处理
ws.GetEventProcessor().RegEventReceiverFunc(event.Sys_Event_WebSocket, ws.GetEventHandler(), ws.wsEventHandler)
@@ -80,7 +80,7 @@ func (ws *WSModule) Init(wsCfg *WSCfg, process processor.IRawProcessor) {
}
func (ws *WSModule) Start() error {
return ws.wsServer.Start()
return ws.WSServer.Start()
}
func (ws *WSModule) wsEventHandler(ev event.IEvent) {
@@ -197,3 +197,7 @@ func (ws *WSModule) SendRawMsg(clientId string, msg []byte) error {
ws.mapClientLocker.Unlock()
return client.wsConn.WriteMsg(msg)
}
func (ws *WSModule) SetMessageType(messageType int) {
ws.WSServer.SetMessageType(messageType)
}

View File

@@ -32,10 +32,10 @@ func Abs[NumType typ.Signed | typ.Float](Num NumType) NumType {
func AddSafe[NumType typ.Number](number1 NumType, number2 NumType) (NumType, bool) {
ret := number1 + number2
if number2 > 0 && ret < number1 {
log.Stack("Calculation overflow", log.Any("number1", number1), log.Any("number2", number2))
log.SStackError("Calculation overflow", log.Any("number1", number1), log.Any("number2", number2))
return ret, false
} else if number2 < 0 && ret > number1 {
log.Stack("Calculation overflow", log.Any("number1", number1), log.Any("number2", number2))
log.SStackError("Calculation overflow", log.Any("number1", number1), log.Any("number2", number2))
return ret, false
}
@@ -45,10 +45,10 @@ func AddSafe[NumType typ.Number](number1 NumType, number2 NumType) (NumType, boo
func SubSafe[NumType typ.Number](number1 NumType, number2 NumType) (NumType, bool) {
ret := number1 - number2
if number2 > 0 && ret > number1 {
log.Stack("Calculation overflow", log.Any("number1", number1), log.Any("number2", number2))
log.SStackError("Calculation overflow", log.Any("number1", number1), log.Any("number2", number2))
return ret, false
} else if number2 < 0 && ret < number1 {
log.Stack("Calculation overflow", log.Any("number1", number1), log.Any("number2", number2))
log.SStackError("Calculation overflow", log.Any("number1", number1), log.Any("number2", number2))
return ret, false
}
@@ -65,7 +65,7 @@ func MulSafe[NumType typ.Number](number1 NumType, number2 NumType) (NumType, boo
return ret, true
}
log.Stack("Calculation overflow", log.Any("number1", number1), log.Any("number2", number2))
log.SStackError("Calculation overflow", log.Any("number1", number1), log.Any("number2", number2))
return ret, true
}