Compare commits

...

102 Commits

Author SHA1 Message Date
boyce
af15615345 优化日志生成路径 2025-03-14 18:03:01 +08:00
boyce
50dd80b082 整理代码 2025-03-10 11:35:19 +08:00
boyce
a6487dd41e 将默认日志改为rotatelogs 2025-01-25 00:14:18 +08:00
boyce
d5299294d8 优化日志,新增rotatelogs库支持 2025-01-25 00:04:31 +08:00
boyce
4d36e525a5 优化配置读取,去消默认cluster目录 2025-01-16 13:45:06 +08:00
duanhf2012
3a4350769c 新增etcd认证配置 2025-01-08 18:11:20 +08:00
duanhf2012
d4966ea129 优化ws模块 2024-12-17 14:46:00 +08:00
duanhf2012
3b10eeb792 优化日志 2024-12-16 18:00:26 +08:00
duanhf2012
e3275e9f2a 优化模块释放顺序 2024-12-11 18:31:37 +08:00
duanhf2012
16745b34f0 优化日志 2024-12-11 17:49:59 +08:00
duanhf2012
f34dc7d53f 优化日志自定义Writer 2024-12-11 17:24:06 +08:00
duanhf2012
0a09dc2fee 优化日志 2024-12-11 17:14:29 +08:00
duanhf2012
f01a93c446 优化日志 2024-12-11 17:03:21 +08:00
duanhf2012
4d2ab4ee4f 优化代码 2024-12-11 16:44:09 +08:00
duanhf2012
ffcc5a3489 1.优化服务配置检查
2.废弃SetGoRoutineNum接口
3.释放Module优化
2024-12-06 16:05:25 +08:00
duanhf2012
cf6ca0483b Merge branch 'v2' of https://github.com/duanhf2012/origin into v2 2024-12-05 10:19:24 +08:00
duanhf2012
97a21e6f71 新增Skip接口 2024-12-05 10:19:15 +08:00
duanhf2012
f60a55d03a 优化异步RPC,去掉error返回值 2024-12-04 18:33:53 +08:00
duanhf2012
2c32d6eec9 RPC与日志优化 2024-12-03 17:21:21 +08:00
duanhf2012
da45f97fa8 优化日志 2024-11-29 15:55:35 +08:00
duanhf2012
d29abc0813 新增配置文件环境变量支持 2024-11-29 14:39:04 +08:00
duanhf2012
c9507f9ee9 替换slog日志为zap 2024-11-29 13:47:51 +08:00
duanhf2012
61de4bba3a 替换slog日志为zap 2024-11-29 13:47:27 +08:00
duanhf2012
000853b479 网络库优化 2024-11-25 17:44:08 +08:00
duanhf2012
387e83d65c Deprecated一些定时器接口 2024-10-12 17:49:59 +08:00
duanhf2012
07a102c6ea 新增发布订阅模式 2024-10-11 09:00:12 +08:00
duanhf2012
e9bbf5b592 优化配置读取 2024-10-10 09:11:29 +08:00
duanhf2012
3f4189fd40 扩展支持yaml格式配置 2024-10-09 18:14:13 +08:00
duanhf2012
c72f2e4582 1.新增yml文件支持
2.优化网络模块日志
3.新增类型转换函数
2024-10-09 17:38:44 +08:00
duanhf2012
f600c2a573 优化module位置 2024-10-09 11:26:37 +08:00
duanhf2012
6a29ba2c88 新增网络模块 2024-10-09 10:50:54 +08:00
duanhf2012
64c14eb326 优化readme文档 2024-10-09 10:08:17 +08:00
duanhf2012
75790302ec 优化tcpservice 2024-10-09 09:50:11 +08:00
duanhf2012
b943ea9a83 1.优化网络模块
2.新增kcp模块
2024-09-30 14:31:24 +08:00
duanhf2012
39116c4402 优化代码规范 2024-09-20 17:25:08 +08:00
duanhf2012
1cf071a444 优化日志 2024-09-20 15:17:44 +08:00
duanhf2012
e6c09064bf 美化代码 2024-09-18 16:19:20 +08:00
duanhf2012
84ab0cb84a 新增定时器组关闭接口 2024-09-18 11:51:17 +08:00
duanhf2012
22fe00173b 新增FrameTimer模块,支持暂停、恢复、加速 2024-09-18 11:35:43 +08:00
duanhf2012
8e0ed62fca 优化smath模块 2024-09-11 15:06:33 +08:00
duanhf2012
7116b509e9 新增权重随机相关函数 2024-09-11 14:50:31 +08:00
duanhf2012
73d384361d 优化Service多协程模式 2024-08-30 16:56:48 +08:00
boyce
ce56b19fe8 优化服务筛选 2024-06-29 14:58:41 +08:00
boyce
1367d776e6 优化readme事件说明 2024-06-29 12:02:13 +08:00
boyce
987d35ff15 优化origin服务发现 2024-06-28 12:29:58 +08:00
boyce
d225bb4bd2 优化启动参数logchannelcap默认值 2024-06-25 09:20:03 +08:00
boyce
ea37fb5081 新增日志接口--SetSkip/GetSkip 2024-06-24 15:29:54 +08:00
boyce
0a92f48d0b 优化node状态 2024-06-20 09:10:18 +08:00
boyce
f5e86fee02 新增Stop接口,断开mongo连接 2024-06-19 15:51:31 +08:00
boyce
166facc959 优化mongodb驱动 2024-06-18 10:07:54 +08:00
boyce
5bb747201b 补充readme说明 2024-06-14 17:46:58 +08:00
boyce
1014bc54e4 优化网络处理器 2024-06-14 16:21:52 +08:00
boyce
9c26c742fe 优化协程池退出 2024-06-14 15:42:23 +08:00
boyce
d1935b1bbc 优化日志 2024-05-27 18:09:37 +08:00
boyce
90d54bf3e2 1.新增服务和Global配置接口,支持通过结构体解析
2.优化日志
2024-05-15 10:06:50 +08:00
boyce
78cc33c84e 优化服务模板的配置读取 2024-05-11 15:11:24 +08:00
boyce
9cf21bf418 1.新增模板服务
2.优化消息队列
3.优化相关日志
2024-05-11 14:42:34 +08:00
boyce
c6d0bd9a19 优化gin模块 2024-05-08 14:18:39 +08:00
boyce
61bf95e457 优化RankService 2024-05-07 18:57:38 +08:00
boyce
8b2a551ee5 优化gin模块 2024-05-06 10:51:51 +08:00
boyce
927c2ffa37 优化gin模块 2024-05-06 10:36:04 +08:00
boyce
b23b30aac5 优化服务发现 2024-04-30 19:05:44 +08:00
boyce
03f8ba0316 精简事件通知 2024-04-30 17:33:34 +08:00
boyce
277480a7f0 Merge branch 'v2' of https://github.com/duanhf2012/origin into v2 2024-04-29 09:24:04 +08:00
boyce
647a654a36 补充优化说明文档 2024-04-29 09:23:54 +08:00
boyce
de483a88f1 优化停服 2024-04-28 18:02:20 +08:00
boyce
bbbb511b5f 新增kafka模块 2024-04-28 11:21:59 +08:00
boyce
0489ee3ef4 新增gin模块&优化pb处理器 2024-04-28 11:13:46 +08:00
boyce
692dacda0c 删除mongomodule,使用mongodbmodule替代 2024-04-28 10:40:56 +08:00
boyce
7f86b1007d 优化支持自定义logger 2024-04-26 18:13:41 +08:00
boyce
ba5e30ae2e 补充readme说明 2024-04-26 15:07:42 +08:00
boyce
0b1a1d2283 Merge branch 'master' into v2 2024-04-26 14:56:28 +08:00
origin
7780947a96 Merge pull request #878 from hjdtpz/master
导出RankData.RefreshTimestamp
2024-04-26 14:48:47 +08:00
boyce
d31b1ac657 更新readme说明以及依赖版本 2024-04-26 14:45:04 +08:00
boyce
27723bf684 新增v2版本说明 2024-04-26 14:02:59 +08:00
boyce
3f45b19bab 优化wsservice与tcpservice的clientid 2024-04-26 10:45:58 +08:00
boyce
94b4572c2f 修改go.mod 2024-04-26 09:53:51 +08:00
boyce
1d970de0f9 优化etcd服务发现 2024-04-25 18:27:47 +08:00
boyce
81625635ba 优化日志 2024-04-25 17:04:42 +08:00
boyce
c6ade7d3e1 优化origin默认服务发现 2024-04-25 16:36:10 +08:00
boyce
bba5eb2929 优化origin默认模式的服务发现 2024-04-25 11:20:06 +08:00
boyce
59efc05d24 新增rpc nats模块 2024-04-23 10:44:21 +08:00
hjdtpz
4c169cf0bb 导出RankData.RefreshTimestamp 2024-04-22 21:47:49 +08:00
boyce
e36693eeff 优化etcd发现服务,支持记录附加信息rpc 2024-04-19 16:39:57 +08:00
boyce
a26210f17f 新增etcd服务发现 2024-04-18 18:50:12 +08:00
boyce
0cf935ffa4 修改nodeid为字符串 2024-04-10 17:30:52 +08:00
boyce
161a67c8a1 提交v2初始版本 2024-04-09 16:40:13 +08:00
boyce
96d02c8f71 create new version 2024-04-09 15:33:06 +08:00
boyce
3a56282a0b 优化httpclient模块 2024-03-19 09:00:20 +08:00
boyce
eebbef52c9 优化重复启动验证 2024-03-08 10:32:32 +08:00
boyce
2ddc54f5ac 优化node启动 2024-03-05 17:38:02 +08:00
boyce
75ef7302de Merge branch 'master' of https://github.com/duanhf2012/origin 2024-03-01 17:47:08 +08:00
boyce
ecea9d1706 优化进程启动 2024-03-01 17:35:32 +08:00
boyce
4898116698 优化concurrent,禁止重复打开 2024-02-27 16:18:51 +08:00
boyce
bcbee6dd11 优化readme 2023-11-20 13:32:34 +08:00
duanhf2012
43122190a3 补充新增GetNodeInfo接口 2023-11-15 08:48:41 +08:00
duanhf2012
39b862e3d9 新增IsNodeRetire函数,用于判断NodeId是否为Retire状态 2023-10-09 16:57:05 +08:00
duanhf2012
8c9b796fce 优化日志输出 2023-10-09 16:50:09 +08:00
duanhf2012
c0971a46a7 新增retire命令参数用于将置于服务退休状态,使得服务软关闭 2023-10-09 15:27:01 +08:00
duanhf2012
ba019ac466 优化服务发现README文档 2023-09-24 13:53:18 +08:00
duanhf2012
c803b9b9ad 优化tcpservice 2023-09-22 16:29:31 +08:00
duanhf2012
3f52ea8331 优化筛选服务发现---支持对指定主master结点进行筛选 2023-09-22 15:43:41 +08:00
115 changed files with 9868 additions and 5265 deletions

1050
README.md

File diff suppressed because it is too large Load Diff

View File

@@ -1,10 +1,13 @@
package cluster
import (
"errors"
"fmt"
"github.com/duanhf2012/origin/log"
"github.com/duanhf2012/origin/rpc"
"github.com/duanhf2012/origin/service"
"github.com/duanhf2012/origin/v2/event"
"github.com/duanhf2012/origin/v2/log"
"github.com/duanhf2012/origin/v2/rpc"
"github.com/duanhf2012/origin/v2/service"
"reflect"
"strings"
"sync"
)
@@ -20,18 +23,23 @@ const (
Discard NodeStatus = 1 //丢弃
)
type DiscoveryService struct {
MasterNodeId string //要筛选的主结点Id如果不配置或者配置成0表示针对所有的主结点
NetworkName string //如果是etcd指定要筛选的网络名中的服务不配置表示所有的网络
ServiceList []string //只发现的服务列表
}
type NodeInfo struct {
NodeId int
NodeName string
NodeId string
Private bool
ListenAddr string
MaxRpcParamLen uint32 //最大Rpc参数长度
CompressBytesLen int //超过字节进行压缩的长度
ServiceList []string //所有的有序服务列表
PublicServiceList []string //对外公开的服务列表
DiscoveryService []string //筛选发现的服务,如果不配置,不进行筛选
NeighborService []string
MaxRpcParamLen uint32 //最大Rpc参数长度
CompressBytesLen int //超过字节进行压缩的长度
ServiceList []string //所有的有序服务列表
PublicServiceList []string //对外公开的服务列表
DiscoveryService []DiscoveryService //筛选发现的服务,如果不配置,不进行筛选
status NodeStatus
Retire bool
}
type NodeRpcInfo struct {
@@ -42,23 +50,26 @@ type NodeRpcInfo struct {
var cluster Cluster
type Cluster struct {
localNodeInfo NodeInfo //本结点配置信息
masterDiscoveryNodeList []NodeInfo //配置发现Master结点
globalCfg interface{} //全局配置
localNodeInfo NodeInfo //本结点配置信息
discoveryInfo DiscoveryInfo //服务发现配置
rpcMode RpcMode
globalCfg interface{} //全局配置
localServiceCfg map[string]interface{} //map[serviceName]配置数据*
serviceDiscovery IServiceDiscovery //服务发现接口
locker sync.RWMutex //结点与服务关系保护锁
mapRpc map[string]*NodeRpcInfo //nodeId
mapServiceNode map[string]map[string]struct{} //map[serviceName]map[NodeId]
mapTemplateServiceNode map[string]map[string]struct{} //map[templateServiceName]map[serviceName]nodeId
locker sync.RWMutex //结点与服务关系保护锁
mapRpc map[int]NodeRpcInfo //nodeId
mapIdNode map[int]NodeInfo //map[NodeId]NodeInfo
mapServiceNode map[string]map[int]struct{} //map[serviceName]map[NodeId]
callSet rpc.CallSet
rpcNats rpc.RpcNats
rpcServer rpc.IServer
rpcServer rpc.Server
rpcEventLocker sync.RWMutex //Rpc事件监听保护锁
mapServiceListenRpcEvent map[string]struct{} //ServiceName
mapServiceListenDiscoveryEvent map[string]struct{} //ServiceName
}
func GetCluster() *Cluster {
@@ -73,80 +84,74 @@ func SetServiceDiscovery(serviceDiscovery IServiceDiscovery) {
cluster.serviceDiscovery = serviceDiscovery
}
func (cls *Cluster) Start() {
cls.rpcServer.Start(cls.localNodeInfo.ListenAddr, cls.localNodeInfo.MaxRpcParamLen,cls.localNodeInfo.CompressBytesLen)
func (cls *Cluster) Start() error {
return cls.rpcServer.Start()
}
func (cls *Cluster) Stop() {
cls.serviceDiscovery.OnNodeStop()
cls.rpcServer.Stop()
}
func (cls *Cluster) DiscardNode(nodeId int) {
func (cls *Cluster) DiscardNode(nodeId string) {
cls.locker.Lock()
nodeInfo, ok := cls.mapIdNode[nodeId]
nodeInfo, ok := cls.mapRpc[nodeId]
bDel := (ok == true) && nodeInfo.nodeInfo.status == Discard
cls.locker.Unlock()
if ok == true && nodeInfo.status == Discard {
cls.DelNode(nodeId, true)
if bDel {
cls.DelNode(nodeId)
}
}
func (cls *Cluster) DelNode(nodeId int, immediately bool) {
func (cls *Cluster) DelNode(nodeId string) {
//MasterDiscover结点与本地结点不删除
if cls.GetMasterDiscoveryNodeInfo(nodeId) != nil || nodeId == cls.localNodeInfo.NodeId {
if cls.IsOriginMasterDiscoveryNode(nodeId) || nodeId == cls.localNodeInfo.NodeId {
return
}
cls.locker.Lock()
defer cls.locker.Unlock()
nodeInfo, ok := cls.mapIdNode[nodeId]
nodeRpc, ok := cls.mapRpc[nodeId]
if ok == false {
return
}
rpc, ok := cls.mapRpc[nodeId]
for {
//立即删除
if immediately || ok == false {
break
}
//正在连接中不主动断开,只断开没有连接中的
if rpc.client.IsConnected() {
nodeInfo.status = Discard
log.Info("Discard node",log.Int("nodeId",nodeInfo.NodeId),log.String("ListenAddr", nodeInfo.ListenAddr))
return
}
break
}
for _, serviceName := range nodeInfo.ServiceList {
cls.TriggerDiscoveryEvent(false, nodeId, nodeRpc.nodeInfo.ServiceList)
for _, serviceName := range nodeRpc.nodeInfo.ServiceList {
cls.delServiceNode(serviceName, nodeId)
}
delete(cls.mapIdNode, nodeId)
delete(cls.mapRpc, nodeId)
if ok == true {
rpc.client.Close(false)
nodeRpc.client.Close(false)
}
log.Info("remove node ",log.Int("NodeId", nodeInfo.NodeId),log.String("ListenAddr", nodeInfo.ListenAddr))
log.Info("remove node ", log.String("NodeId", nodeRpc.nodeInfo.NodeId), log.String("ListenAddr", nodeRpc.nodeInfo.ListenAddr))
}
func (cls *Cluster) serviceDiscoveryDelNode(nodeId int, immediately bool) {
if nodeId == 0 {
return
}
cls.DelNode(nodeId, immediately)
func (cls *Cluster) serviceDiscoveryDelNode(nodeId string) {
cls.DelNode(nodeId)
}
func (cls *Cluster) delServiceNode(serviceName string, nodeId int) {
func (cls *Cluster) delServiceNode(serviceName string, nodeId string) {
if nodeId == cls.localNodeInfo.NodeId {
return
}
//处理模板服务
splitServiceName := strings.Split(serviceName, ":")
if len(splitServiceName) == 2 {
serviceName = splitServiceName[0]
templateServiceName := splitServiceName[1]
mapService := cls.mapTemplateServiceNode[templateServiceName]
delete(mapService, serviceName)
if len(cls.mapTemplateServiceNode[templateServiceName]) == 0 {
delete(cls.mapTemplateServiceNode, templateServiceName)
}
}
mapNode := cls.mapServiceNode[serviceName]
delete(mapNode, nodeId)
if len(mapNode) == 0 {
@@ -164,13 +169,14 @@ func (cls *Cluster) serviceDiscoverySetNodeInfo(nodeInfo *NodeInfo) {
defer cls.locker.Unlock()
//先清一次的NodeId对应的所有服务清理
lastNodeInfo, ok := cls.mapIdNode[nodeInfo.NodeId]
lastNodeInfo, ok := cls.mapRpc[nodeInfo.NodeId]
if ok == true {
for _, serviceName := range lastNodeInfo.ServiceList {
for _, serviceName := range lastNodeInfo.nodeInfo.ServiceList {
cls.delServiceNode(serviceName, nodeInfo.NodeId)
}
}
cluster.TriggerDiscoveryEvent(true, nodeInfo.NodeId, nodeInfo.PublicServiceList)
//再重新组装
mapDuplicate := map[string]interface{}{} //预防重复数据
for _, serviceName := range nodeInfo.PublicServiceList {
@@ -180,49 +186,73 @@ func (cls *Cluster) serviceDiscoverySetNodeInfo(nodeInfo *NodeInfo) {
continue
}
mapDuplicate[serviceName] = nil
if _, ok := cls.mapServiceNode[serviceName]; ok == false {
cls.mapServiceNode[serviceName] = make(map[int]struct{}, 1)
//如果是模板服务,则记录模板关系
splitServiceName := strings.Split(serviceName, ":")
if len(splitServiceName) == 2 {
serviceName = splitServiceName[0]
templateServiceName := splitServiceName[1]
//记录模板
if _, ok = cls.mapTemplateServiceNode[templateServiceName]; ok == false {
cls.mapTemplateServiceNode[templateServiceName] = map[string]struct{}{}
}
cls.mapTemplateServiceNode[templateServiceName][serviceName] = struct{}{}
}
if _, ok = cls.mapServiceNode[serviceName]; ok == false {
cls.mapServiceNode[serviceName] = make(map[string]struct{}, 1)
}
cls.mapServiceNode[serviceName][nodeInfo.NodeId] = struct{}{}
}
cls.mapIdNode[nodeInfo.NodeId] = *nodeInfo
log.Info("Discovery nodeId",log.Int("NodeId", nodeInfo.NodeId),log.Any("services:", nodeInfo.PublicServiceList))
//已经存在连接,则不需要进行设置
if _, rpcInfoOK := cls.mapRpc[nodeInfo.NodeId]; rpcInfoOK == true {
if lastNodeInfo != nil {
log.Info("Discovery nodeId", log.String("NodeId", nodeInfo.NodeId), log.Any("services:", nodeInfo.PublicServiceList), log.Bool("Retire", nodeInfo.Retire))
lastNodeInfo.nodeInfo = *nodeInfo
return
}
//不存在时,则建立连接
rpcInfo := NodeRpcInfo{}
rpcInfo.nodeInfo = *nodeInfo
rpcInfo.client =rpc.NewRClient(nodeInfo.NodeId, nodeInfo.ListenAddr, nodeInfo.MaxRpcParamLen,cls.localNodeInfo.CompressBytesLen,cls.triggerRpcEvent)
cls.mapRpc[nodeInfo.NodeId] = rpcInfo
if cls.IsNatsMode() {
rpcInfo.client = cls.rpcNats.NewNatsClient(nodeInfo.NodeId, cls.GetLocalNodeInfo().NodeId, &cls.callSet, cls.NotifyAllService)
} else {
rpcInfo.client = rpc.NewRClient(nodeInfo.NodeId, nodeInfo.ListenAddr, nodeInfo.MaxRpcParamLen, cls.localNodeInfo.CompressBytesLen, &cls.callSet, cls.NotifyAllService)
}
cls.mapRpc[nodeInfo.NodeId] = &rpcInfo
if cls.IsNatsMode() == true || cls.discoveryInfo.discoveryType != OriginType {
log.Info("Discovery nodeId and new rpc client", log.String("NodeId", nodeInfo.NodeId), log.Any("services:", nodeInfo.PublicServiceList), log.Bool("Retire", nodeInfo.Retire))
} else {
log.Info("Discovery nodeId and new rpc client", log.String("NodeId", nodeInfo.NodeId), log.Any("services:", nodeInfo.PublicServiceList), log.Bool("Retire", nodeInfo.Retire), log.String("nodeListenAddr", nodeInfo.ListenAddr))
}
}
func (cls *Cluster) buildLocalRpc() {
rpcInfo := NodeRpcInfo{}
rpcInfo.nodeInfo = cls.localNodeInfo
rpcInfo.client = rpc.NewLClient(rpcInfo.nodeInfo.NodeId)
cls.mapRpc[cls.localNodeInfo.NodeId] = rpcInfo
}
func (cls *Cluster) Init(localNodeId int, setupServiceFun SetupServiceFun) error {
func (cls *Cluster) Init(localNodeId string, setupServiceFun SetupServiceFun) error {
//1.初始化配置
err := cls.InitCfg(localNodeId)
if err != nil {
return err
}
cls.rpcServer.Init(cls)
cls.buildLocalRpc()
cls.callSet.Init()
if cls.IsNatsMode() {
cls.rpcNats.Init(cls.rpcMode.Nats.NatsUrl, cls.rpcMode.Nats.NoRandomize, cls.GetLocalNodeInfo().NodeId, cls.localNodeInfo.CompressBytesLen, cls, cluster.NotifyAllService)
cls.rpcServer = &cls.rpcNats
} else {
s := &rpc.Server{}
s.Init(cls.localNodeInfo.ListenAddr, cls.localNodeInfo.MaxRpcParamLen, cls.localNodeInfo.CompressBytesLen, cls)
cls.rpcServer = s
}
//2.安装服务发现结点
cls.SetupServiceDiscovery(localNodeId, setupServiceFun)
err = cls.setupDiscovery(localNodeId, setupServiceFun)
if err != nil {
log.Error("setupDiscovery fail", log.ErrorField("err", err))
return err
}
service.RegRpcEventFun = cls.RegRpcEvent
service.UnRegRpcEventFun = cls.UnRegRpcEvent
service.RegDiscoveryServiceEventFun = cls.RegDiscoveryEvent
service.UnRegDiscoveryServiceEventFun = cls.UnReDiscoveryEvent
err = cls.serviceDiscovery.InitDiscovery(localNodeId, cls.serviceDiscoveryDelNode, cls.serviceDiscoverySetNodeInfo)
if err != nil {
@@ -232,74 +262,6 @@ func (cls *Cluster) Init(localNodeId int, setupServiceFun SetupServiceFun) error
return nil
}
func (cls *Cluster) checkDynamicDiscovery(localNodeId int) (bool, bool) {
var localMaster bool //本结点是否为Master结点
var hasMaster bool //是否配置Master服务
//遍历所有结点
for _, nodeInfo := range cls.masterDiscoveryNodeList {
if nodeInfo.NodeId == localNodeId {
localMaster = true
}
hasMaster = true
}
//返回查询结果
return localMaster, hasMaster
}
func (cls *Cluster) AddDynamicDiscoveryService(serviceName string, bPublicService bool) {
addServiceList := append([]string{},serviceName)
cls.localNodeInfo.ServiceList = append(addServiceList,cls.localNodeInfo.ServiceList...)
if bPublicService {
cls.localNodeInfo.PublicServiceList = append(cls.localNodeInfo.PublicServiceList, serviceName)
}
if _, ok := cls.mapServiceNode[serviceName]; ok == false {
cls.mapServiceNode[serviceName] = map[int]struct{}{}
}
cls.mapServiceNode[serviceName][cls.localNodeInfo.NodeId] = struct{}{}
}
func (cls *Cluster) GetDiscoveryNodeList() []NodeInfo {
return cls.masterDiscoveryNodeList
}
func (cls *Cluster) GetMasterDiscoveryNodeInfo(nodeId int) *NodeInfo {
for i := 0; i < len(cls.masterDiscoveryNodeList); i++ {
if cls.masterDiscoveryNodeList[i].NodeId == nodeId {
return &cls.masterDiscoveryNodeList[i]
}
}
return nil
}
func (cls *Cluster) IsMasterDiscoveryNode() bool {
return cls.GetMasterDiscoveryNodeInfo(cls.GetLocalNodeInfo().NodeId) != nil
}
func (cls *Cluster) SetupServiceDiscovery(localNodeId int, setupServiceFun SetupServiceFun) {
if cls.serviceDiscovery != nil {
return
}
//1.如果没有配置DiscoveryNode配置则使用默认配置文件发现服务
localMaster, hasMaster := cls.checkDynamicDiscovery(localNodeId)
if hasMaster == false {
cls.serviceDiscovery = &ConfigDiscovery{}
return
}
setupServiceFun(&masterService, &clientService)
//2.如果为动态服务发现安装本地发现服务
cls.serviceDiscovery = getDynamicDiscovery()
cls.AddDynamicDiscoveryService(DynamicDiscoveryClientName, true)
if localMaster == true {
cls.AddDynamicDiscoveryService(DynamicDiscoveryMasterName, false)
}
}
func (cls *Cluster) FindRpcHandler(serviceName string) rpc.IRpcHandler {
pService := service.GetService(serviceName)
if pService == nil {
@@ -309,93 +271,89 @@ func (cls *Cluster) FindRpcHandler(serviceName string) rpc.IRpcHandler {
return pService.GetRpcHandler()
}
func (cls *Cluster) getRpcClient(nodeId int) *rpc.Client {
func (cls *Cluster) getRpcClient(nodeId string) (*rpc.Client, bool) {
c, ok := cls.mapRpc[nodeId]
if ok == false {
return nil
return nil, false
}
return c.client
return c.client, c.nodeInfo.Retire
}
func (cls *Cluster) GetRpcClient(nodeId int) *rpc.Client {
func (cls *Cluster) GetRpcClient(nodeId string) (*rpc.Client, bool) {
cls.locker.RLock()
defer cls.locker.RUnlock()
return cls.getRpcClient(nodeId)
}
func GetRpcClient(nodeId int, serviceMethod string, clientList []*rpc.Client) (error, int) {
if nodeId > 0 {
pClient := GetCluster().GetRpcClient(nodeId)
func GetNodeIdByTemplateService(templateServiceName string, rpcClientList []*rpc.Client, filterRetire bool) (error, []*rpc.Client) {
return GetCluster().GetNodeIdByTemplateService(templateServiceName, rpcClientList, filterRetire)
}
func GetRpcClient(nodeId string, serviceMethod string, filterRetire bool, clientList []*rpc.Client) (error, []*rpc.Client) {
if nodeId != rpc.NodeIdNull {
pClient, retire := GetCluster().GetRpcClient(nodeId)
if pClient == nil {
return fmt.Errorf("cannot find nodeid %d!", nodeId), 0
return fmt.Errorf("cannot find nodeid %s", nodeId), nil
}
clientList[0] = pClient
return nil, 1
//如果需要筛选掉退休结点
if filterRetire == true && retire == true {
return fmt.Errorf("cannot find nodeid %s", nodeId), nil
}
clientList = append(clientList, pClient)
return nil, clientList
}
findIndex := strings.Index(serviceMethod, ".")
if findIndex == -1 {
return fmt.Errorf("servicemethod param %s is error!", serviceMethod), 0
return fmt.Errorf("servicemethod param %s is error!", serviceMethod), nil
}
serviceName := serviceMethod[:findIndex]
//1.找到对应的rpcNodeid
return GetCluster().GetNodeIdByService(serviceName, clientList, true)
return GetCluster().GetNodeIdByService(serviceName, clientList, filterRetire)
}
func GetRpcServer() *rpc.Server {
return &cluster.rpcServer
func GetRpcServer() rpc.IServer {
return cluster.rpcServer
}
func (cls *Cluster) IsNodeConnected(nodeId int) bool {
pClient := cls.GetRpcClient(nodeId)
func (cls *Cluster) IsNodeConnected(nodeId string) bool {
pClient, _ := cls.GetRpcClient(nodeId)
return pClient != nil && pClient.IsConnected()
}
func (cls *Cluster) triggerRpcEvent(bConnect bool, clientId uint32, nodeId int) {
cls.locker.Lock()
nodeInfo, ok := cls.mapRpc[nodeId]
if ok == false || nodeInfo.client == nil || nodeInfo.client.GetClientId() != clientId {
cls.locker.Unlock()
return
}
cls.locker.Unlock()
func (cls *Cluster) IsNodeRetire(nodeId string) bool {
cls.locker.RLock()
defer cls.locker.RUnlock()
_, retire := cls.getRpcClient(nodeId)
return retire
}
func (cls *Cluster) NotifyAllService(event event.IEvent) {
cls.rpcEventLocker.Lock()
defer cls.rpcEventLocker.Unlock()
for serviceName, _ := range cls.mapServiceListenRpcEvent {
for serviceName := range cls.mapServiceListenRpcEvent {
ser := service.GetService(serviceName)
if ser == nil {
log.Error("cannot find service name "+serviceName)
log.Error("cannot find service name " + serviceName)
continue
}
var eventData service.RpcConnEvent
eventData.IsConnect = bConnect
eventData.NodeId = nodeId
ser.(service.IModule).NotifyEvent(&eventData)
ser.(service.IModule).NotifyEvent(event)
}
}
func (cls *Cluster) TriggerDiscoveryEvent(bDiscovery bool, nodeId int, serviceName []string) {
cls.rpcEventLocker.Lock()
defer cls.rpcEventLocker.Unlock()
for sName, _ := range cls.mapServiceListenDiscoveryEvent {
ser := service.GetService(sName)
if ser == nil {
log.Error("cannot find service",log.Any("services",serviceName))
continue
}
var eventData service.DiscoveryServiceEvent
eventData.IsDiscovery = bDiscovery
eventData.NodeId = nodeId
eventData.ServiceName = serviceName
ser.(service.IModule).NotifyEvent(&eventData)
}
func (cls *Cluster) TriggerDiscoveryEvent(bDiscovery bool, nodeId string, serviceName []string) {
var eventData service.DiscoveryServiceEvent
eventData.IsDiscovery = bDiscovery
eventData.NodeId = nodeId
eventData.ServiceName = serviceName
cls.NotifyAllService(&eventData)
}
func (cls *Cluster) GetLocalNodeInfo() *NodeInfo {
@@ -418,26 +376,7 @@ func (cls *Cluster) UnRegRpcEvent(serviceName string) {
cls.rpcEventLocker.Unlock()
}
func (cls *Cluster) RegDiscoveryEvent(serviceName string) {
cls.rpcEventLocker.Lock()
if cls.mapServiceListenDiscoveryEvent == nil {
cls.mapServiceListenDiscoveryEvent = map[string]struct{}{}
}
cls.mapServiceListenDiscoveryEvent[serviceName] = struct{}{}
cls.rpcEventLocker.Unlock()
}
func (cls *Cluster) UnReDiscoveryEvent(serviceName string) {
cls.rpcEventLocker.Lock()
delete(cls.mapServiceListenDiscoveryEvent, serviceName)
cls.rpcEventLocker.Unlock()
}
func HasService(nodeId int, serviceName string) bool {
func HasService(nodeId string, serviceName string) bool {
cluster.locker.RLock()
defer cluster.locker.RUnlock()
@@ -450,7 +389,7 @@ func HasService(nodeId int, serviceName string) bool {
return false
}
func GetNodeByServiceName(serviceName string) map[int]struct{} {
func GetNodeByServiceName(serviceName string) map[string]struct{} {
cluster.locker.RLock()
defer cluster.locker.RUnlock()
@@ -459,23 +398,93 @@ func GetNodeByServiceName(serviceName string) map[int]struct{} {
return nil
}
mapNodeId := map[int]struct{}{}
for nodeId,_ := range mapNode {
mapNodeId := map[string]struct{}{}
for nodeId := range mapNode {
mapNodeId[nodeId] = struct{}{}
}
return mapNodeId
}
// GetNodeByTemplateServiceName 通过模板服务名获取服务名,返回 map[serviceName真实服务名]NodeId
func GetNodeByTemplateServiceName(templateServiceName string) map[string]string {
cluster.locker.RLock()
defer cluster.locker.RUnlock()
mapServiceName := cluster.mapTemplateServiceNode[templateServiceName]
mapNodeId := make(map[string]string, 9)
for serviceName := range mapServiceName {
mapNode, ok := cluster.mapServiceNode[serviceName]
if ok == false {
return nil
}
for nodeId := range mapNode {
mapNodeId[serviceName] = nodeId
}
}
return mapNodeId
}
func (cls *Cluster) GetGlobalCfg() interface{} {
return cls.globalCfg
}
func (cls *Cluster) ParseGlobalCfg(cfg interface{}) error {
if cls.globalCfg == nil {
return errors.New("no service configuration found")
}
func (cls *Cluster) GetNodeInfo(nodeId int) (NodeInfo,bool) {
rv := reflect.ValueOf(cls.globalCfg)
if rv.Kind() == reflect.Ptr && rv.IsNil() {
return errors.New("no service configuration found")
}
bytes, err := json.Marshal(cls.globalCfg)
if err != nil {
return err
}
return json.Unmarshal(bytes, cfg)
}
func (cls *Cluster) GetNodeInfo(nodeId string) (NodeInfo, bool) {
cls.locker.RLock()
defer cls.locker.RUnlock()
nodeInfo,ok:= cls.mapIdNode[nodeId]
return nodeInfo,ok
nodeInfo, ok := cls.mapRpc[nodeId]
if ok == false || nodeInfo == nil {
return NodeInfo{}, false
}
return nodeInfo.nodeInfo, true
}
func (cls *Cluster) CanDiscoveryService(fromMasterNodeId string, serviceName string) bool {
canDiscovery := true
splitServiceName := strings.Split(serviceName, ":")
if len(splitServiceName) == 2 {
serviceName = splitServiceName[0]
}
for i := 0; i < len(cls.GetLocalNodeInfo().DiscoveryService); i++ {
masterNodeId := cls.GetLocalNodeInfo().DiscoveryService[i].MasterNodeId
//无效的配置,则跳过
if masterNodeId == rpc.NodeIdNull && len(cls.GetLocalNodeInfo().DiscoveryService[i].ServiceList) == 0 {
continue
}
canDiscovery = false
if masterNodeId == fromMasterNodeId || masterNodeId == rpc.NodeIdNull {
for _, discoveryService := range cls.GetLocalNodeInfo().DiscoveryService[i].ServiceList {
if discoveryService == serviceName {
return true
}
}
}
}
return canDiscovery
}

View File

@@ -1,34 +1,31 @@
package cluster
import "github.com/duanhf2012/origin/v2/rpc"
type ConfigDiscovery struct {
funDelService FunDelNode
funSetService FunSetNodeInfo
localNodeId int
funDelNode FunDelNode
funSetNode FunSetNode
localNodeId string
}
func (discovery *ConfigDiscovery) InitDiscovery(localNodeId int,funDelNode FunDelNode,funSetNodeInfo FunSetNodeInfo) error{
func (discovery *ConfigDiscovery) InitDiscovery(localNodeId string, funDelNode FunDelNode, funSetNode FunSetNode) error {
discovery.localNodeId = localNodeId
discovery.funDelService = funDelNode
discovery.funSetService = funSetNodeInfo
discovery.funDelNode = funDelNode
discovery.funSetNode = funSetNode
//解析本地其他服务配置
_,nodeInfoList,err := GetCluster().readLocalClusterConfig(0)
_, nodeInfoList, _, err := GetCluster().readLocalClusterConfig(rpc.NodeIdNull)
if err != nil {
return err
}
for _,nodeInfo := range nodeInfoList {
for _, nodeInfo := range nodeInfoList {
if nodeInfo.NodeId == localNodeId {
continue
}
discovery.funSetService(&nodeInfo)
discovery.funSetNode(&nodeInfo)
}
return nil
}
func (discovery *ConfigDiscovery) OnNodeStop(){
}

71
cluster/discovery.go Normal file
View File

@@ -0,0 +1,71 @@
package cluster
import (
"errors"
"github.com/duanhf2012/origin/v2/service"
)
func (cls *Cluster) setupDiscovery(localNodeId string, setupServiceFun SetupServiceFun) error {
if cls.discoveryInfo.getDiscoveryType() == OriginType { //origin类型服务发现
return cls.setupOriginDiscovery(localNodeId, setupServiceFun)
} else if cls.discoveryInfo.getDiscoveryType() == EtcdType { //etcd类型服务发现
return cls.setupEtcdDiscovery(localNodeId, setupServiceFun)
}
return cls.setupConfigDiscovery(localNodeId, setupServiceFun)
}
func (cls *Cluster) setupOriginDiscovery(localNodeId string, setupServiceFun SetupServiceFun) error {
if cls.serviceDiscovery != nil {
return errors.New("service discovery has been setup")
}
//1.如果没有配置DiscoveryNode配置则使用默认配置文件发现服务
localMaster, hasMaster := cls.checkOriginDiscovery(localNodeId)
if hasMaster == false {
return errors.New("no master node config")
}
cls.serviceDiscovery = getOriginDiscovery()
//2.如果为动态服务发现安装本地发现服务
if localMaster == true {
setupServiceFun(&masterService)
cls.AddDiscoveryService(OriginDiscoveryMasterName, false)
}
setupServiceFun(&clientService)
cls.AddDiscoveryService(OriginDiscoveryClientName, true)
return nil
}
func (cls *Cluster) setupEtcdDiscovery(localNodeId string, setupServiceFun SetupServiceFun) error {
if cls.serviceDiscovery != nil {
return errors.New("service discovery has been setup")
}
//setup etcd service
cls.serviceDiscovery = getEtcdDiscovery()
setupServiceFun(cls.serviceDiscovery.(service.IService))
cls.AddDiscoveryService(cls.serviceDiscovery.(service.IService).GetName(), false)
return nil
}
func (cls *Cluster) setupConfigDiscovery(localNodeId string, setupServiceFun SetupServiceFun) error {
if cls.serviceDiscovery != nil {
return errors.New("service discovery has been setup")
}
cls.serviceDiscovery = &ConfigDiscovery{}
return nil
}
func (cls *Cluster) GetOriginDiscovery() *OriginDiscovery {
return cls.discoveryInfo.Origin
}
func (cls *Cluster) GetEtcdDiscovery() *EtcdDiscovery {
return cls.discoveryInfo.Etcd
}

View File

@@ -1,432 +0,0 @@
package cluster
import (
"errors"
"github.com/duanhf2012/origin/log"
"github.com/duanhf2012/origin/rpc"
"github.com/duanhf2012/origin/service"
"time"
"github.com/duanhf2012/origin/util/timer"
)
const DynamicDiscoveryMasterName = "DiscoveryMaster"
const DynamicDiscoveryClientName = "DiscoveryClient"
const RegServiceDiscover = DynamicDiscoveryMasterName + ".RPC_RegServiceDiscover"
const SubServiceDiscover = DynamicDiscoveryClientName + ".RPC_SubServiceDiscover"
const AddSubServiceDiscover = DynamicDiscoveryMasterName + ".RPC_AddSubServiceDiscover"
type DynamicDiscoveryMaster struct {
service.Service
mapNodeInfo map[int32]struct{}
nodeInfo []*rpc.NodeInfo
}
type DynamicDiscoveryClient struct {
service.Service
funDelService FunDelNode
funSetService FunSetNodeInfo
localNodeId int
mapDiscovery map[int32]map[int32]struct{} //map[masterNodeId]map[nodeId]struct{}
}
var masterService DynamicDiscoveryMaster
var clientService DynamicDiscoveryClient
func getDynamicDiscovery() IServiceDiscovery {
return &clientService
}
func init() {
masterService.SetName(DynamicDiscoveryMasterName)
clientService.SetName(DynamicDiscoveryClientName)
}
func (ds *DynamicDiscoveryMaster) isRegNode(nodeId int32) bool {
_, ok := ds.mapNodeInfo[nodeId]
return ok
}
func (ds *DynamicDiscoveryMaster) addNodeInfo(nodeInfo *rpc.NodeInfo) {
if len(nodeInfo.PublicServiceList) == 0 {
return
}
_, ok := ds.mapNodeInfo[nodeInfo.NodeId]
if ok == true {
return
}
ds.mapNodeInfo[nodeInfo.NodeId] = struct{}{}
ds.nodeInfo = append(ds.nodeInfo, nodeInfo)
}
func (ds *DynamicDiscoveryMaster) removeNodeInfo(nodeId int32) {
if _,ok:= ds.mapNodeInfo[nodeId];ok == false {
return
}
for i:=0;i<len(ds.nodeInfo);i++ {
if ds.nodeInfo[i].NodeId == nodeId {
ds.nodeInfo = append(ds.nodeInfo[:i],ds.nodeInfo[i+1:]...)
break
}
}
delete(ds.mapNodeInfo,nodeId)
}
func (ds *DynamicDiscoveryMaster) OnInit() error {
ds.mapNodeInfo = make(map[int32]struct{}, 20)
ds.RegRpcListener(ds)
return nil
}
func (ds *DynamicDiscoveryMaster) OnStart() {
var nodeInfo rpc.NodeInfo
localNodeInfo := cluster.GetLocalNodeInfo()
if localNodeInfo.Private == true {
return
}
nodeInfo.NodeId = int32(localNodeInfo.NodeId)
nodeInfo.NodeName = localNodeInfo.NodeName
nodeInfo.ListenAddr = localNodeInfo.ListenAddr
nodeInfo.PublicServiceList = localNodeInfo.PublicServiceList
nodeInfo.MaxRpcParamLen = localNodeInfo.MaxRpcParamLen
ds.addNodeInfo(&nodeInfo)
}
func (ds *DynamicDiscoveryMaster) OnNodeConnected(nodeId int) {
//没注册过结点不通知
if ds.isRegNode(int32(nodeId)) == false {
return
}
//向它发布所有服务列表信息
var notifyDiscover rpc.SubscribeDiscoverNotify
notifyDiscover.IsFull = true
notifyDiscover.NodeInfo = ds.nodeInfo
notifyDiscover.MasterNodeId = int32(cluster.GetLocalNodeInfo().NodeId)
ds.GoNode(nodeId, SubServiceDiscover, &notifyDiscover)
}
func (ds *DynamicDiscoveryMaster) OnNodeDisconnect(nodeId int) {
if ds.isRegNode(int32(nodeId)) == false {
return
}
ds.removeNodeInfo(int32(nodeId))
var notifyDiscover rpc.SubscribeDiscoverNotify
notifyDiscover.MasterNodeId = int32(cluster.GetLocalNodeInfo().NodeId)
notifyDiscover.DelNodeId = int32(nodeId)
//删除结点
cluster.DelNode(nodeId, true)
//无注册过的结点不广播避免非当前Master网络中的连接断开时通知到本网络
ds.CastGo(SubServiceDiscover, &notifyDiscover)
}
func (ds *DynamicDiscoveryMaster) RpcCastGo(serviceMethod string, args interface{}) {
for nodeId, _ := range ds.mapNodeInfo {
ds.GoNode(int(nodeId), serviceMethod, args)
}
}
// 收到注册过来的结点
func (ds *DynamicDiscoveryMaster) RPC_RegServiceDiscover(req *rpc.ServiceDiscoverReq, res *rpc.Empty) error {
if req.NodeInfo == nil {
err := errors.New("RPC_RegServiceDiscover req is error.")
log.Error(err.Error())
return err
}
//广播给其他所有结点
var notifyDiscover rpc.SubscribeDiscoverNotify
notifyDiscover.MasterNodeId = int32(cluster.GetLocalNodeInfo().NodeId)
notifyDiscover.NodeInfo = append(notifyDiscover.NodeInfo, req.NodeInfo)
ds.RpcCastGo(SubServiceDiscover, &notifyDiscover)
//存入本地
ds.addNodeInfo(req.NodeInfo)
//初始化结点信息
var nodeInfo NodeInfo
nodeInfo.NodeId = int(req.NodeInfo.NodeId)
nodeInfo.NodeName = req.NodeInfo.NodeName
nodeInfo.Private = req.NodeInfo.Private
nodeInfo.ServiceList = req.NodeInfo.PublicServiceList
nodeInfo.PublicServiceList = req.NodeInfo.PublicServiceList
nodeInfo.ListenAddr = req.NodeInfo.ListenAddr
nodeInfo.MaxRpcParamLen = req.NodeInfo.MaxRpcParamLen
//主动删除已经存在的结点,确保先断开,再连接
cluster.serviceDiscoveryDelNode(nodeInfo.NodeId, true)
//加入到本地Cluster模块中将连接该结点
cluster.serviceDiscoverySetNodeInfo(&nodeInfo)
return nil
}
func (dc *DynamicDiscoveryClient) OnInit() error {
dc.RegRpcListener(dc)
dc.mapDiscovery = map[int32]map[int32]struct{}{}
return nil
}
func (dc *DynamicDiscoveryClient) addMasterNode(masterNodeId int32, nodeId int32) {
_, ok := dc.mapDiscovery[masterNodeId]
if ok == false {
dc.mapDiscovery[masterNodeId] = map[int32]struct{}{}
}
dc.mapDiscovery[masterNodeId][nodeId] = struct{}{}
}
func (dc *DynamicDiscoveryClient) removeMasterNode(masterNodeId int32, nodeId int32) {
mapNodeId, ok := dc.mapDiscovery[masterNodeId]
if ok == false {
return
}
delete(mapNodeId, nodeId)
}
func (dc *DynamicDiscoveryClient) findNodeId(nodeId int32) bool {
for _, mapNodeId := range dc.mapDiscovery {
_, ok := mapNodeId[nodeId]
if ok == true {
return true
}
}
return false
}
func (dc *DynamicDiscoveryClient) OnStart() {
//2.添加并连接发现主结点
dc.addDiscoveryMaster()
}
func (dc *DynamicDiscoveryClient) addDiscoveryMaster() {
discoveryNodeList := cluster.GetDiscoveryNodeList()
for i := 0; i < len(discoveryNodeList); i++ {
if discoveryNodeList[i].NodeId == cluster.GetLocalNodeInfo().NodeId {
continue
}
dc.funSetService(&discoveryNodeList[i])
}
}
func (dc *DynamicDiscoveryClient) fullCompareDiffNode(masterNodeId int32, mapNodeInfo map[int32]*rpc.NodeInfo) []int32 {
if mapNodeInfo == nil {
return nil
}
diffNodeIdSlice := make([]int32, 0, len(mapNodeInfo))
mapNodeId := map[int32]struct{}{}
mapNodeId, ok := dc.mapDiscovery[masterNodeId]
if ok == false {
return nil
}
//本地任何Master都不存在的放到diffNodeIdSlice
for nodeId, _ := range mapNodeId {
_, ok := mapNodeInfo[nodeId]
if ok == false {
diffNodeIdSlice = append(diffNodeIdSlice, nodeId)
}
}
return diffNodeIdSlice
}
//订阅发现的服务通知
func (dc *DynamicDiscoveryClient) RPC_SubServiceDiscover(req *rpc.SubscribeDiscoverNotify) error {
//整理当前master结点需要筛选的NeighborService
masterDiscoveryNodeInfo := cluster.GetMasterDiscoveryNodeInfo(int(req.MasterNodeId))
mapMasterDiscoveryService := map[string]struct{}{}
if masterDiscoveryNodeInfo != nil {
for i := 0; i < len(masterDiscoveryNodeInfo.NeighborService); i++ {
mapMasterDiscoveryService[masterDiscoveryNodeInfo.NeighborService[i]] = struct{}{}
}
}
mapNodeInfo := map[int32]*rpc.NodeInfo{}
for _, nodeInfo := range req.NodeInfo {
//不对本地结点或者不存在任何公开服务的结点
if int(nodeInfo.NodeId) == dc.localNodeId {
continue
}
if cluster.IsMasterDiscoveryNode() == false && len(nodeInfo.PublicServiceList) == 1 &&
nodeInfo.PublicServiceList[0] == DynamicDiscoveryClientName {
continue
}
//遍历所有的公开服务,并筛选之
for _, serviceName := range nodeInfo.PublicServiceList {
//只有存在配置时才做筛选
if len(mapMasterDiscoveryService) > 0 {
if _, ok := mapMasterDiscoveryService[serviceName]; ok == false {
continue
}
}
nInfo := mapNodeInfo[nodeInfo.NodeId]
if nInfo == nil {
nInfo = &rpc.NodeInfo{}
nInfo.NodeId = nodeInfo.NodeId
nInfo.NodeName = nodeInfo.NodeName
nInfo.ListenAddr = nodeInfo.ListenAddr
nInfo.MaxRpcParamLen = nodeInfo.MaxRpcParamLen
mapNodeInfo[nodeInfo.NodeId] = nInfo
}
nInfo.PublicServiceList = append(nInfo.PublicServiceList, serviceName)
}
}
//如果为完整同步,则找出差异的结点
var willDelNodeId []int32
//如果不是邻居结点,则做筛选
if req.IsFull == true {
diffNode := dc.fullCompareDiffNode(req.MasterNodeId, mapNodeInfo)
if len(diffNode) > 0 {
willDelNodeId = append(willDelNodeId, diffNode...)
}
}
//指定删除结点
if req.DelNodeId > 0 && req.DelNodeId != int32(dc.localNodeId) {
willDelNodeId = append(willDelNodeId, req.DelNodeId)
}
//删除不必要的结点
for _, nodeId := range willDelNodeId {
nodeInfo,_ := cluster.GetNodeInfo(int(nodeId))
cluster.TriggerDiscoveryEvent(false,int(nodeId),nodeInfo.PublicServiceList)
dc.removeMasterNode(req.MasterNodeId, int32(nodeId))
if dc.findNodeId(nodeId) == false {
dc.funDelService(int(nodeId), false)
}
}
//设置新结点
for _, nodeInfo := range mapNodeInfo {
dc.addMasterNode(req.MasterNodeId, nodeInfo.NodeId)
dc.setNodeInfo(nodeInfo)
if len(nodeInfo.PublicServiceList) == 0 {
continue
}
cluster.TriggerDiscoveryEvent(true,int(nodeInfo.NodeId),nodeInfo.PublicServiceList)
}
return nil
}
func (dc *DynamicDiscoveryClient) isDiscoverNode(nodeId int) bool {
for i := 0; i < len(cluster.masterDiscoveryNodeList); i++ {
if cluster.masterDiscoveryNodeList[i].NodeId == nodeId {
return true
}
}
return false
}
func (dc *DynamicDiscoveryClient) OnNodeConnected(nodeId int) {
dc.regServiceDiscover(nodeId)
}
func (dc *DynamicDiscoveryClient) regServiceDiscover(nodeId int){
nodeInfo := cluster.GetMasterDiscoveryNodeInfo(nodeId)
if nodeInfo == nil {
return
}
var req rpc.ServiceDiscoverReq
req.NodeInfo = &rpc.NodeInfo{}
req.NodeInfo.NodeId = int32(cluster.localNodeInfo.NodeId)
req.NodeInfo.NodeName = cluster.localNodeInfo.NodeName
req.NodeInfo.ListenAddr = cluster.localNodeInfo.ListenAddr
req.NodeInfo.MaxRpcParamLen = cluster.localNodeInfo.MaxRpcParamLen
//MasterDiscoveryNode配置中没有配置NeighborService则同步当前结点所有服务
if len(nodeInfo.NeighborService) == 0 {
req.NodeInfo.PublicServiceList = cluster.localNodeInfo.PublicServiceList
} else {
req.NodeInfo.PublicServiceList = append(req.NodeInfo.PublicServiceList, DynamicDiscoveryClientName)
}
//向Master服务同步本Node服务信息
err := dc.AsyncCallNode(nodeId, RegServiceDiscover, &req, func(res *rpc.Empty, err error) {
if err != nil {
log.Error("call "+RegServiceDiscover+" is fail :"+ err.Error())
dc.AfterFunc(time.Second*3, func(timer *timer.Timer) {
dc.regServiceDiscover(nodeId)
})
return
}
})
if err != nil {
log.Error("call "+ RegServiceDiscover+" is fail :"+ err.Error())
}
}
func (dc *DynamicDiscoveryClient) setNodeInfo(nodeInfo *rpc.NodeInfo) {
if nodeInfo == nil || nodeInfo.Private == true || int(nodeInfo.NodeId) == dc.localNodeId {
return
}
//筛选关注的服务
localNodeInfo := cluster.GetLocalNodeInfo()
if len(localNodeInfo.DiscoveryService) > 0 {
var discoverServiceSlice = make([]string, 0, 24)
for _, pubService := range nodeInfo.PublicServiceList {
for _, discoverService := range localNodeInfo.DiscoveryService {
if pubService == discoverService {
discoverServiceSlice = append(discoverServiceSlice, pubService)
}
}
}
nodeInfo.PublicServiceList = discoverServiceSlice
}
if len(nodeInfo.PublicServiceList) == 0 {
return
}
var nInfo NodeInfo
nInfo.ServiceList = nodeInfo.PublicServiceList
nInfo.PublicServiceList = nodeInfo.PublicServiceList
nInfo.NodeId = int(nodeInfo.NodeId)
nInfo.NodeName = nodeInfo.NodeName
nInfo.ListenAddr = nodeInfo.ListenAddr
nInfo.MaxRpcParamLen = nodeInfo.MaxRpcParamLen
dc.funSetService(&nInfo)
}
func (dc *DynamicDiscoveryClient) OnNodeDisconnect(nodeId int) {
//将Discard结点清理
cluster.DiscardNode(nodeId)
}
func (dc *DynamicDiscoveryClient) InitDiscovery(localNodeId int, funDelNode FunDelNode, funSetNodeInfo FunSetNodeInfo) error {
dc.localNodeId = localNodeId
dc.funDelService = funDelNode
dc.funSetService = funSetNodeInfo
return nil
}
func (dc *DynamicDiscoveryClient) OnNodeStop() {
}

547
cluster/etcddiscovery.go Normal file
View File

@@ -0,0 +1,547 @@
package cluster
import (
"context"
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"github.com/duanhf2012/origin/v2/event"
"github.com/duanhf2012/origin/v2/log"
"github.com/duanhf2012/origin/v2/rpc"
"github.com/duanhf2012/origin/v2/service"
"github.com/duanhf2012/origin/v2/util/timer"
"go.etcd.io/etcd/api/v3/mvccpb"
"go.etcd.io/etcd/client/v3"
"google.golang.org/protobuf/proto"
"os"
"path"
"strings"
"sync/atomic"
"time"
)
const originDir = "/origin"
type etcdClientInfo struct {
watchKeys []string
leaseID clientv3.LeaseID
keepAliveChan <-chan *clientv3.LeaseKeepAliveResponse
}
type EtcdDiscoveryService struct {
service.Service
funDelNode FunDelNode
funSetNode FunSetNode
localNodeId string
byteLocalNodeInfo string
mapClient map[*clientv3.Client]*etcdClientInfo
isClose int32
bRetire bool
mapDiscoveryNodeId map[string]map[string]struct{} //map[networkName]map[nodeId]
}
var etcdDiscovery *EtcdDiscoveryService
func getEtcdDiscovery() IServiceDiscovery {
if etcdDiscovery == nil {
etcdDiscovery = &EtcdDiscoveryService{}
}
return etcdDiscovery
}
func (ed *EtcdDiscoveryService) InitDiscovery(localNodeId string, funDelNode FunDelNode, funSetNode FunSetNode) error {
ed.localNodeId = localNodeId
ed.funDelNode = funDelNode
ed.funSetNode = funSetNode
return nil
}
const (
eeGets = 0
eePut = 1
eeDelete = 2
)
type etcdDiscoveryEvent struct {
typ int
watchKey string
Kvs []*mvccpb.KeyValue
}
func (ee *etcdDiscoveryEvent) GetEventType() event.EventType {
return event.Sys_Event_EtcdDiscovery
}
func (ed *EtcdDiscoveryService) OnInit() error {
ed.mapClient = make(map[*clientv3.Client]*etcdClientInfo, 1)
ed.mapDiscoveryNodeId = make(map[string]map[string]struct{})
ed.GetEventProcessor().RegEventReceiverFunc(event.Sys_Event_EtcdDiscovery, ed.GetEventHandler(), ed.OnEtcdDiscovery)
err := ed.marshalNodeInfo()
if err != nil {
return err
}
etcdDiscoveryCfg := cluster.GetEtcdDiscovery()
if etcdDiscoveryCfg == nil {
return errors.New("etcd discovery config is nil.")
}
for i := 0; i < len(etcdDiscoveryCfg.EtcdList); i++ {
var client *clientv3.Client
var tlsConfig *tls.Config
if etcdDiscoveryCfg.EtcdList[i].Cert != "" {
// load cert
cert, cErr := tls.LoadX509KeyPair(etcdDiscoveryCfg.EtcdList[i].Cert, etcdDiscoveryCfg.EtcdList[i].CertKey)
if cErr != nil {
log.Error("load cert error", log.ErrorField("err", cErr))
return cErr
}
// load root ca
caData, cErr := os.ReadFile(etcdDiscoveryCfg.EtcdList[i].Ca)
if cErr != nil {
log.Error("load root ca error", log.ErrorField("err", cErr))
return cErr
}
pool := x509.NewCertPool()
pool.AppendCertsFromPEM(caData)
tlsConfig = &tls.Config{
Certificates: []tls.Certificate{cert},
RootCAs: pool,
}
}
client, err = clientv3.New(clientv3.Config{
Endpoints: etcdDiscoveryCfg.EtcdList[i].Endpoints,
DialTimeout: etcdDiscoveryCfg.DialTimeoutMillisecond,
Username: etcdDiscoveryCfg.EtcdList[i].UserName,
Password: etcdDiscoveryCfg.EtcdList[i].Password,
Logger: log.GetLogger().Logger,
TLS: tlsConfig,
})
if err != nil {
log.Error("etcd discovery init fail", log.ErrorField("err", err))
return err
}
ctx, _ := context.WithTimeout(context.Background(), time.Second*3)
_, err = client.Leases(ctx)
if err != nil {
log.Error("etcd discovery init fail", log.Any("endpoint", etcdDiscoveryCfg.EtcdList[i].Endpoints), log.ErrorField("err", err))
return err
}
ec := &etcdClientInfo{}
for _, networkName := range etcdDiscoveryCfg.EtcdList[i].NetworkName {
ec.watchKeys = append(ec.watchKeys, fmt.Sprintf("%s/%s", originDir, networkName))
}
ed.mapClient[client] = ec
}
return nil
}
func (ed *EtcdDiscoveryService) getRegisterKey(watchkey string) string {
return watchkey + "/" + ed.localNodeId
}
func (ed *EtcdDiscoveryService) registerServiceByClient(client *clientv3.Client, etcdClient *etcdClientInfo) {
// 创建租约
var err error
var resp *clientv3.LeaseGrantResponse
resp, err = client.Grant(context.Background(), cluster.GetEtcdDiscovery().TTLSecond)
if err != nil {
log.Error("etcd registerService fail", log.ErrorField("err", err))
ed.tryRegisterService(client, etcdClient)
return
}
etcdClient.leaseID = resp.ID
for _, watchKey := range etcdClient.watchKeys {
// 注册服务节点到 etcd
_, err = client.Put(context.Background(), ed.getRegisterKey(watchKey), ed.byteLocalNodeInfo, clientv3.WithLease(resp.ID))
if err != nil {
log.Error("etcd Put fail", log.ErrorField("err", err))
ed.tryRegisterService(client, etcdClient)
return
}
}
etcdClient.keepAliveChan, err = client.KeepAlive(context.Background(), etcdClient.leaseID)
if err != nil {
log.Error("etcd KeepAlive fail", log.ErrorField("err", err))
ed.tryRegisterService(client, etcdClient)
return
}
go func() {
for {
select {
case _, ok := <-etcdClient.keepAliveChan:
//log.Debug("ok",log.Any("addr",client.Endpoints()))
if !ok {
log.Error("etcd keepAliveChan fail", log.Any("watchKeys", etcdClient.watchKeys))
ed.tryRegisterService(client, etcdClient)
return
}
}
}
}()
}
func (ed *EtcdDiscoveryService) tryRegisterService(client *clientv3.Client, etcdClient *etcdClientInfo) {
if ed.isStop() {
return
}
ed.AfterFunc(time.Second*3, func(t *timer.Timer) {
ed.registerServiceByClient(client, etcdClient)
})
}
func (ed *EtcdDiscoveryService) tryWatch(client *clientv3.Client, etcdClient *etcdClientInfo) {
if ed.isStop() {
return
}
ed.AfterFunc(time.Second*3, func(t *timer.Timer) {
ed.watchByClient(client, etcdClient)
})
}
func (ed *EtcdDiscoveryService) tryLaterRetire() {
ed.AfterFunc(time.Second, func(*timer.Timer) {
if ed.retire() != nil {
ed.tryLaterRetire()
}
})
}
func (ed *EtcdDiscoveryService) retire() error {
//从etcd中更新
for c, ec := range ed.mapClient {
for _, watchKey := range ec.watchKeys {
// 注册服务节点到 etcd
_, err := c.Put(context.Background(), ed.getRegisterKey(watchKey), ed.byteLocalNodeInfo, clientv3.WithLease(ec.leaseID))
if err != nil {
log.Error("etcd Put fail", log.ErrorField("err", err))
return err
}
}
}
return nil
}
func (ed *EtcdDiscoveryService) OnRetire() {
ed.bRetire = true
ed.marshalNodeInfo()
if ed.retire() != nil {
ed.tryLaterRetire()
}
}
func (ed *EtcdDiscoveryService) OnRelease() {
atomic.StoreInt32(&ed.isClose, 1)
ed.close()
}
func (ed *EtcdDiscoveryService) isStop() bool {
return atomic.LoadInt32(&ed.isClose) == 1
}
func (ed *EtcdDiscoveryService) OnStart() {
for c, ec := range ed.mapClient {
ed.tryRegisterService(c, ec)
ed.tryWatch(c, ec)
}
}
func (ed *EtcdDiscoveryService) marshalNodeInfo() error {
nInfo := cluster.GetLocalNodeInfo()
var nodeInfo rpc.NodeInfo
nodeInfo.NodeId = nInfo.NodeId
nodeInfo.ListenAddr = nInfo.ListenAddr
nodeInfo.Retire = ed.bRetire
nodeInfo.PublicServiceList = nInfo.PublicServiceList
nodeInfo.MaxRpcParamLen = nInfo.MaxRpcParamLen
byteLocalNodeInfo, err := proto.Marshal(&nodeInfo)
if err == nil {
ed.byteLocalNodeInfo = string(byteLocalNodeInfo)
}
return err
}
func (ed *EtcdDiscoveryService) setNodeInfo(networkName string, nodeInfo *rpc.NodeInfo) bool {
if nodeInfo == nil || nodeInfo.Private == true || nodeInfo.NodeId == ed.localNodeId {
return false
}
//筛选关注的服务
var discoverServiceSlice = make([]string, 0, 24)
for _, pubService := range nodeInfo.PublicServiceList {
if cluster.CanDiscoveryService(networkName, pubService) == true {
discoverServiceSlice = append(discoverServiceSlice, pubService)
}
}
if len(discoverServiceSlice) == 0 {
return false
}
var nInfo NodeInfo
nInfo.ServiceList = discoverServiceSlice
nInfo.PublicServiceList = discoverServiceSlice
nInfo.NodeId = nodeInfo.NodeId
nInfo.ListenAddr = nodeInfo.ListenAddr
nInfo.MaxRpcParamLen = nodeInfo.MaxRpcParamLen
nInfo.Retire = nodeInfo.Retire
nInfo.Private = nodeInfo.Private
ed.funSetNode(&nInfo)
return true
}
func (ed *EtcdDiscoveryService) close() {
for c, ec := range ed.mapClient {
if _, err := c.Revoke(context.Background(), ec.leaseID); err != nil {
log.Error("etcd Revoke fail", log.ErrorField("err", err))
}
c.Watcher.Close()
err := c.Close()
if err != nil {
log.Error("etcd Close fail", log.ErrorField("err", err))
}
}
}
func (ed *EtcdDiscoveryService) getServices(client *clientv3.Client, etcdClient *etcdClientInfo, watchKey string) bool {
// 根据前缀获取现有的key
resp, err := client.Get(context.Background(), watchKey, clientv3.WithPrefix())
if err != nil {
log.Error("etcd Get fail", log.ErrorField("err", err))
ed.tryWatch(client, etcdClient)
return false
}
// 遍历获取得到的k和v
ed.notifyGets(watchKey, resp.Kvs)
return true
}
func (ed *EtcdDiscoveryService) watchByClient(client *clientv3.Client, etcdClient *etcdClientInfo) {
//先关闭所有的watcher
for _, watchKey := range etcdClient.watchKeys {
// 监视前缀修改变更server
go ed.watcher(client, etcdClient, watchKey)
}
}
// watcher 监听Key的前缀
func (ed *EtcdDiscoveryService) watcher(client *clientv3.Client, etcdClient *etcdClientInfo, watchKey string) {
defer func() {
if r := recover(); r != nil {
log.StackError(fmt.Sprint(r))
ed.tryWatch(client, etcdClient)
}
}()
rch := client.Watch(context.Background(), watchKey, clientv3.WithPrefix())
if ed.getServices(client, etcdClient, watchKey) == false {
return
}
for wresp := range rch {
for _, ev := range wresp.Events {
switch ev.Type {
case clientv3.EventTypePut: // 修改或者新增
ed.notifyPut(watchKey, ev.Kv)
case clientv3.EventTypeDelete: // 删除
ed.notifyDelete(watchKey, ev.Kv)
}
}
}
ed.tryWatch(client, etcdClient)
}
func (ed *EtcdDiscoveryService) setNode(netWorkName string, byteNode []byte) string {
var nodeInfo rpc.NodeInfo
err := proto.Unmarshal(byteNode, &nodeInfo)
if err != nil {
log.Error("Unmarshal fail", log.String("netWorkName", netWorkName), log.ErrorField("err", err))
return ""
}
ed.setNodeInfo(netWorkName, &nodeInfo)
return nodeInfo.NodeId
}
func (ed *EtcdDiscoveryService) delNode(fullKey string) string {
nodeId := ed.getNodeId(fullKey)
if nodeId == ed.localNodeId {
return ""
}
ed.funDelNode(nodeId)
return nodeId
}
func (ed *EtcdDiscoveryService) getNetworkNameByWatchKey(watchKey string) string {
return watchKey[strings.LastIndex(watchKey, "/")+1:]
}
func (ed *EtcdDiscoveryService) getNetworkNameByFullKey(fullKey string) string {
return fullKey[len(originDir)+1 : strings.LastIndex(fullKey, "/")]
}
func (ed *EtcdDiscoveryService) getNodeId(fullKey string) string {
return fullKey[strings.LastIndex(fullKey, "/")+1:]
}
func (ed *EtcdDiscoveryService) OnEtcdDiscovery(ev event.IEvent) {
disEvent := ev.(*etcdDiscoveryEvent)
switch disEvent.typ {
case eeGets:
ed.OnEventGets(disEvent.watchKey, disEvent.Kvs)
case eePut:
if len(disEvent.Kvs) == 1 {
ed.OnEventPut(disEvent.watchKey, disEvent.Kvs[0])
}
case eeDelete:
if len(disEvent.Kvs) == 1 {
ed.OnEventDelete(disEvent.watchKey, disEvent.Kvs[0])
}
}
}
func (ed *EtcdDiscoveryService) notifyGets(watchKey string, Kvs []*mvccpb.KeyValue) {
var ev etcdDiscoveryEvent
ev.typ = eeGets
ev.watchKey = watchKey
ev.Kvs = Kvs
ed.NotifyEvent(&ev)
}
func (ed *EtcdDiscoveryService) notifyPut(watchKey string, Kvs *mvccpb.KeyValue) {
var ev etcdDiscoveryEvent
ev.typ = eePut
ev.watchKey = watchKey
ev.Kvs = append(ev.Kvs, Kvs)
ed.NotifyEvent(&ev)
}
func (ed *EtcdDiscoveryService) notifyDelete(watchKey string, Kvs *mvccpb.KeyValue) {
var ev etcdDiscoveryEvent
ev.typ = eeDelete
ev.watchKey = watchKey
ev.Kvs = append(ev.Kvs, Kvs)
ed.NotifyEvent(&ev)
}
func (ed *EtcdDiscoveryService) OnEventGets(watchKey string, Kvs []*mvccpb.KeyValue) {
mapNode := make(map[string]struct{}, 32)
for _, kv := range Kvs {
nodeId := ed.setNode(ed.getNetworkNameByFullKey(string(kv.Key)), kv.Value)
mapNode[nodeId] = struct{}{}
ed.addNodeId(watchKey, nodeId)
}
// 此段代码为遍历并删除过期节点的逻辑。
// 对于mapDiscoveryNodeId中与watchKey关联的所有节点ID遍历该集合。
// 如果某个节点ID不在mapNode中且不是本地节点ID则调用funDelNode函数删除该节点。
mapLastNodeId := ed.mapDiscoveryNodeId[watchKey] // 根据watchKey获取对应的节点ID集合
for nodeId := range mapLastNodeId { // 遍历所有节点ID
if _, ok := mapNode[nodeId]; ok == false && nodeId != ed.localNodeId { // 检查节点是否不存在于mapNode且不是本地节点
ed.funDelNode(nodeId) // 调用函数删除该节点
delete(ed.mapDiscoveryNodeId[watchKey], nodeId)
}
}
}
func (ed *EtcdDiscoveryService) OnEventPut(watchKey string, Kv *mvccpb.KeyValue) {
nodeId := ed.setNode(ed.getNetworkNameByFullKey(string(Kv.Key)), Kv.Value)
ed.addNodeId(watchKey, nodeId)
}
func (ed *EtcdDiscoveryService) OnEventDelete(watchKey string, Kv *mvccpb.KeyValue) {
nodeId := ed.delNode(string(Kv.Key))
delete(ed.mapDiscoveryNodeId[watchKey], nodeId)
}
func (ed *EtcdDiscoveryService) addNodeId(watchKey string, nodeId string) {
if _, ok := ed.mapDiscoveryNodeId[watchKey]; ok == false {
ed.mapDiscoveryNodeId[watchKey] = make(map[string]struct{})
}
ed.mapDiscoveryNodeId[watchKey][nodeId] = struct{}{}
}
func (ed *EtcdDiscoveryService) OnNodeDisconnect(nodeId string) {
//将Discard结点清理
cluster.DiscardNode(nodeId)
}
func (ed *EtcdDiscoveryService) RPC_ServiceRecord(etcdServiceRecord *service.EtcdServiceRecordEvent, empty *service.Empty) error {
var client *clientv3.Client
//写入到etcd中
for c, info := range ed.mapClient {
for _, watchKey := range info.watchKeys {
if ed.getNetworkNameByWatchKey(watchKey) == etcdServiceRecord.NetworkName {
client = c
break
}
}
}
if client == nil {
log.Error("etcd record fail,cannot find network name", log.String("networkName", etcdServiceRecord.NetworkName))
return errors.New("annot find network name")
}
var lg *clientv3.LeaseGrantResponse
var err error
if etcdServiceRecord.TTLSecond > 0 {
ctx, _ := context.WithTimeout(context.Background(), time.Second*3)
lg, err = client.Grant(ctx, etcdServiceRecord.TTLSecond)
if err != nil {
log.Error("etcd record fail,cannot grant lease", log.ErrorField("err", err))
return errors.New("cannot grant lease")
}
}
if lg != nil {
ctx, _ := context.WithTimeout(context.Background(), time.Second*3)
_, err = client.Put(ctx, path.Join(originDir, etcdServiceRecord.RecordKey), etcdServiceRecord.RecordInfo, clientv3.WithLease(lg.ID))
if err != nil {
log.Error("etcd record fail,cannot put record", log.ErrorField("err", err))
}
return errors.New("cannot put record")
}
_, err = client.Put(context.Background(), path.Join(originDir, etcdServiceRecord.RecordKey), etcdServiceRecord.RecordInfo)
if err != nil {
log.Error("etcd record fail,cannot put record", log.ErrorField("err", err))
return errors.New("cannot put record")
}
return nil
}

71
cluster/nodettl.go Normal file
View File

@@ -0,0 +1,71 @@
package cluster
import (
"container/list"
"time"
)
type nodeTTL struct {
nodeId string
refreshTime time.Time
}
type nodeSetTTL struct {
l *list.List
mapElement map[string]*list.Element
ttl time.Duration
}
func (ns *nodeSetTTL) init(ttl time.Duration) {
ns.ttl = ttl
ns.mapElement = make(map[string]*list.Element, 32)
ns.l = list.New()
}
func (ns *nodeSetTTL) removeNode(nodeId string) {
ele, ok := ns.mapElement[nodeId]
if ok == false {
return
}
ns.l.Remove(ele)
delete(ns.mapElement, nodeId)
}
func (ns *nodeSetTTL) addAndRefreshNode(nodeId string) {
ele, ok := ns.mapElement[nodeId]
if ok == false {
ele = ns.l.PushBack(nodeId)
ele.Value = &nodeTTL{nodeId, time.Now()}
ns.mapElement[nodeId] = ele
return
}
ele.Value.(*nodeTTL).refreshTime = time.Now()
ns.l.MoveToBack(ele)
}
func (ns *nodeSetTTL) checkTTL(cb func(nodeIdList []string)) {
var nodeIdList []string
for {
f := ns.l.Front()
if f == nil {
break
}
nt := f.Value.(*nodeTTL)
if time.Now().Sub(nt.refreshTime) > ns.ttl {
nodeIdList = append(nodeIdList, nt.nodeId)
} else {
break
}
//删除结点
ns.l.Remove(f)
delete(ns.mapElement, nt.nodeId)
}
if len(nodeIdList) > 0 {
cb(nodeIdList)
}
}

642
cluster/origindiscovery.go Normal file
View File

@@ -0,0 +1,642 @@
package cluster
import (
"errors"
"github.com/duanhf2012/origin/v2/log"
"github.com/duanhf2012/origin/v2/rpc"
"github.com/duanhf2012/origin/v2/service"
"github.com/duanhf2012/origin/v2/util/timer"
"google.golang.org/protobuf/proto"
"time"
)
const OriginDiscoveryMasterName = "DiscoveryMaster"
const OriginDiscoveryClientName = "DiscoveryClient"
const RegServiceDiscover = OriginDiscoveryMasterName + ".RPC_RegServiceDiscover"
const SubServiceDiscover = OriginDiscoveryClientName + ".RPC_SubServiceDiscover"
const NodeRetireRpcMethod = OriginDiscoveryMasterName + ".RPC_NodeRetire"
const RpcPingMethod = OriginDiscoveryMasterName + ".RPC_Ping"
const UnRegServiceDiscover = OriginDiscoveryMasterName + ".RPC_UnRegServiceDiscover"
type OriginDiscoveryMaster struct {
service.Service
mapNodeInfo map[string]struct{}
nodeInfo []*rpc.NodeInfo
nsTTL nodeSetTTL
}
type OriginDiscoveryClient struct {
service.Service
funDelNode FunDelNode
funSetNode FunSetNode
localNodeId string
mapDiscovery map[string]map[string][]string //map[masterNodeId]map[nodeId]struct{}
bRetire bool
isRegisterOk bool
}
var masterService OriginDiscoveryMaster
var clientService OriginDiscoveryClient
func getOriginDiscovery() IServiceDiscovery {
return &clientService
}
func init() {
masterService.SetName(OriginDiscoveryMasterName)
clientService.SetName(OriginDiscoveryClientName)
}
func (ds *OriginDiscoveryMaster) isRegNode(nodeId string) bool {
_, ok := ds.mapNodeInfo[nodeId]
return ok
}
func (ds *OriginDiscoveryMaster) updateNodeInfo(nInfo *rpc.NodeInfo) {
if _, ok := ds.mapNodeInfo[nInfo.NodeId]; ok == false {
return
}
nodeInfo := proto.Clone(nInfo).(*rpc.NodeInfo)
for i := 0; i < len(ds.nodeInfo); i++ {
if ds.nodeInfo[i].NodeId == nodeInfo.NodeId {
ds.nodeInfo[i] = nodeInfo
break
}
}
}
func (ds *OriginDiscoveryMaster) addNodeInfo(nInfo *rpc.NodeInfo) {
if len(nInfo.PublicServiceList) == 0 {
return
}
_, ok := ds.mapNodeInfo[nInfo.NodeId]
if ok == true {
return
}
ds.mapNodeInfo[nInfo.NodeId] = struct{}{}
nodeInfo := proto.Clone(nInfo).(*rpc.NodeInfo)
ds.nodeInfo = append(ds.nodeInfo, nodeInfo)
}
func (ds *OriginDiscoveryMaster) removeNodeInfo(nodeId string) {
if _, ok := ds.mapNodeInfo[nodeId]; ok == false {
return
}
for i := 0; i < len(ds.nodeInfo); i++ {
if ds.nodeInfo[i].NodeId == nodeId {
ds.nodeInfo = append(ds.nodeInfo[:i], ds.nodeInfo[i+1:]...)
break
}
}
ds.nsTTL.removeNode(nodeId)
delete(ds.mapNodeInfo, nodeId)
}
func (ds *OriginDiscoveryMaster) OnInit() error {
ds.mapNodeInfo = make(map[string]struct{}, 20)
ds.RegNodeConnListener(ds)
ds.RegNatsConnListener(ds)
ds.nsTTL.init(time.Duration(cluster.GetOriginDiscovery().TTLSecond) * time.Second)
return nil
}
func (ds *OriginDiscoveryMaster) checkTTL() {
if cluster.IsNatsMode() == false {
return
}
interval := time.Duration(cluster.GetOriginDiscovery().TTLSecond) * time.Second
interval = interval / 3 / 2
if interval < time.Second {
interval = time.Second
}
ds.NewTicker(interval, func(t *timer.Ticker) {
ds.nsTTL.checkTTL(func(nodeIdList []string) {
for _, nodeId := range nodeIdList {
log.Info("TTL expiry", log.String("nodeId", nodeId))
ds.OnNodeDisconnect(nodeId)
}
})
})
}
func (ds *OriginDiscoveryMaster) OnStart() {
var nodeInfo rpc.NodeInfo
localNodeInfo := cluster.GetLocalNodeInfo()
nodeInfo.NodeId = localNodeInfo.NodeId
nodeInfo.ListenAddr = localNodeInfo.ListenAddr
nodeInfo.PublicServiceList = localNodeInfo.PublicServiceList
nodeInfo.MaxRpcParamLen = localNodeInfo.MaxRpcParamLen
nodeInfo.Private = localNodeInfo.Private
nodeInfo.Retire = localNodeInfo.Retire
ds.addNodeInfo(&nodeInfo)
ds.checkTTL()
}
func (ds *OriginDiscoveryMaster) OnNatsConnected() {
//向所有的节点同步服务发现信息
var notifyDiscover rpc.SubscribeDiscoverNotify
notifyDiscover.IsFull = true
notifyDiscover.NodeInfo = ds.nodeInfo
notifyDiscover.MasterNodeId = cluster.GetLocalNodeInfo().NodeId
ds.RpcCastGo(SubServiceDiscover, &notifyDiscover)
}
func (ds *OriginDiscoveryMaster) OnNatsDisconnect() {
}
func (ds *OriginDiscoveryMaster) OnNodeConnected(nodeId string) {
var notifyDiscover rpc.SubscribeDiscoverNotify
notifyDiscover.IsFull = true
notifyDiscover.NodeInfo = ds.nodeInfo
notifyDiscover.MasterNodeId = cluster.GetLocalNodeInfo().NodeId
ds.GoNode(nodeId, SubServiceDiscover, &notifyDiscover)
}
func (ds *OriginDiscoveryMaster) OnNodeDisconnect(nodeId string) {
if ds.isRegNode(nodeId) == false {
return
}
ds.removeNodeInfo(nodeId)
//主动删除已经存在的结点,确保先断开,再连接
var notifyDiscover rpc.SubscribeDiscoverNotify
notifyDiscover.MasterNodeId = cluster.GetLocalNodeInfo().NodeId
notifyDiscover.DelNodeId = nodeId
//删除结点
cluster.DelNode(nodeId)
//无注册过的结点不广播避免非当前Master网络中的连接断开时通知到本网络
ds.CastGo(SubServiceDiscover, &notifyDiscover)
}
func (ds *OriginDiscoveryMaster) RpcCastGo(serviceMethod string, args interface{}) {
for nodeId := range ds.mapNodeInfo {
if nodeId == cluster.GetLocalNodeInfo().NodeId {
continue
}
ds.GoNode(nodeId, serviceMethod, args)
}
}
func (ds *OriginDiscoveryMaster) RPC_Ping(req *rpc.Ping, res *rpc.Pong) error {
if ds.isRegNode(req.NodeId) == false {
res.Ok = false
return nil
}
res.Ok = true
ds.nsTTL.addAndRefreshNode(req.NodeId)
return nil
}
func (ds *OriginDiscoveryMaster) RPC_NodeRetire(req *rpc.NodeRetireReq, _ *rpc.Empty) error {
log.Info("node is retire", log.String("nodeId", req.NodeInfo.NodeId), log.Bool("retire", req.NodeInfo.Retire))
ds.updateNodeInfo(req.NodeInfo)
var notifyDiscover rpc.SubscribeDiscoverNotify
notifyDiscover.MasterNodeId = cluster.GetLocalNodeInfo().NodeId
notifyDiscover.NodeInfo = append(notifyDiscover.NodeInfo, req.NodeInfo)
ds.RpcCastGo(SubServiceDiscover, &notifyDiscover)
return nil
}
// 收到注册过来的结点
func (ds *OriginDiscoveryMaster) RPC_RegServiceDiscover(req *rpc.RegServiceDiscoverReq, res *rpc.SubscribeDiscoverNotify) error {
if req.NodeInfo == nil {
err := errors.New("RPC_RegServiceDiscover req is error.")
log.Error(err.Error())
return err
}
if req.NodeInfo.NodeId != cluster.GetLocalNodeInfo().NodeId {
ds.nsTTL.addAndRefreshNode(req.NodeInfo.NodeId)
}
//广播给其他所有结点
var notifyDiscover rpc.SubscribeDiscoverNotify
notifyDiscover.MasterNodeId = cluster.GetLocalNodeInfo().NodeId
notifyDiscover.NodeInfo = append(notifyDiscover.NodeInfo, req.NodeInfo)
ds.RpcCastGo(SubServiceDiscover, &notifyDiscover)
//存入本地
ds.addNodeInfo(req.NodeInfo)
//初始化结点信息
var nodeInfo NodeInfo
nodeInfo.NodeId = req.NodeInfo.NodeId
nodeInfo.Private = req.NodeInfo.Private
nodeInfo.ServiceList = req.NodeInfo.PublicServiceList
nodeInfo.PublicServiceList = req.NodeInfo.PublicServiceList
nodeInfo.ListenAddr = req.NodeInfo.ListenAddr
nodeInfo.MaxRpcParamLen = req.NodeInfo.MaxRpcParamLen
nodeInfo.Retire = req.NodeInfo.Retire
//主动删除已经存在的结点,确保先断开,再连接
cluster.serviceDiscoveryDelNode(nodeInfo.NodeId)
//加入到本地Cluster模块中将连接该结点
cluster.serviceDiscoverySetNodeInfo(&nodeInfo)
res.IsFull = true
res.NodeInfo = ds.nodeInfo
res.MasterNodeId = cluster.GetLocalNodeInfo().NodeId
return nil
}
func (ds *OriginDiscoveryMaster) RPC_UnRegServiceDiscover(req *rpc.UnRegServiceDiscoverReq, _ *rpc.Empty) error {
log.Debug("RPC_UnRegServiceDiscover", log.String("nodeId", req.NodeId))
ds.OnNodeDisconnect(req.NodeId)
return nil
}
func (dc *OriginDiscoveryClient) OnInit() error {
dc.RegNodeConnListener(dc)
dc.RegNatsConnListener(dc)
dc.mapDiscovery = map[string]map[string][]string{}
//dc.mapMasterNetwork = map[string]string{}
return nil
}
func (dc *OriginDiscoveryClient) addMasterNode(masterNodeId string, nodeId string, serviceList []string) {
_, ok := dc.mapDiscovery[masterNodeId]
if ok == false {
dc.mapDiscovery[masterNodeId] = map[string][]string{}
}
dc.mapDiscovery[masterNodeId][nodeId] = serviceList
}
func (dc *OriginDiscoveryClient) getNodePublicService(masterNodeId string, nodeId string) []string {
mapNodeId, ok := dc.mapDiscovery[masterNodeId]
if ok == false {
return nil
}
publicService := mapNodeId[nodeId]
return publicService
}
func (dc *OriginDiscoveryClient) removeMasterNode(masterNodeId string, nodeId string) {
mapNodeId, ok := dc.mapDiscovery[masterNodeId]
if ok == false {
return
}
delete(mapNodeId, nodeId)
}
func (dc *OriginDiscoveryClient) findNodeId(nodeId string) bool {
for _, mapNodeId := range dc.mapDiscovery {
_, ok := mapNodeId[nodeId]
if ok == true {
return true
}
}
return false
}
func (dc *OriginDiscoveryClient) ping() {
interval := time.Duration(cluster.GetOriginDiscovery().TTLSecond) * time.Second
interval = interval / 3
if interval < time.Second {
interval = time.Second
}
dc.NewTicker(interval, func(t *timer.Ticker) {
if cluster.IsNatsMode() == false || dc.isRegisterOk == false {
return
}
var ping rpc.Ping
ping.NodeId = cluster.GetLocalNodeInfo().NodeId
masterNodes := GetCluster().GetOriginDiscovery().MasterNodeList
for i := 0; i < len(masterNodes); i++ {
if masterNodes[i].NodeId == cluster.GetLocalNodeInfo().NodeId {
continue
}
masterNodeId := masterNodes[i].NodeId
dc.AsyncCallNodeWithTimeout(3*time.Second, masterNodeId, RpcPingMethod, &ping, func(empty *rpc.Pong, err error) {
if err == nil && empty.Ok == false {
//断开master重
dc.regServiceDiscover(masterNodeId)
}
})
}
})
}
func (dc *OriginDiscoveryClient) OnStart() {
//2.添加并连接发现主结点
dc.addDiscoveryMaster()
dc.ping()
}
func (dc *OriginDiscoveryClient) addDiscoveryMaster() {
discoveryNodeList := cluster.GetOriginDiscovery()
for i := 0; i < len(discoveryNodeList.MasterNodeList); i++ {
if discoveryNodeList.MasterNodeList[i].NodeId == cluster.GetLocalNodeInfo().NodeId {
continue
}
dc.funSetNode(&discoveryNodeList.MasterNodeList[i])
}
}
func (dc *OriginDiscoveryClient) fullCompareDiffNode(masterNodeId string, mapNodeInfo map[string]*rpc.NodeInfo) []string {
if mapNodeInfo == nil {
return nil
}
diffNodeIdSlice := make([]string, 0, len(mapNodeInfo))
mapNodeId := map[string][]string{}
mapNodeId, ok := dc.mapDiscovery[masterNodeId]
if ok == false {
return nil
}
//本地任何Master都不存在的放到diffNodeIdSlice
for nodeId := range mapNodeId {
_, ok = mapNodeInfo[nodeId]
if ok == false {
diffNodeIdSlice = append(diffNodeIdSlice, nodeId)
}
}
return diffNodeIdSlice
}
// RPC_SubServiceDiscover 订阅发现的服务通知
func (dc *OriginDiscoveryClient) RPC_SubServiceDiscover(req *rpc.SubscribeDiscoverNotify) error {
mapNodeInfo := map[string]*rpc.NodeInfo{}
for _, nodeInfo := range req.NodeInfo {
//不对本地结点或者不存在任何公开服务的结点
if nodeInfo.NodeId == dc.localNodeId {
continue
}
if cluster.IsOriginMasterDiscoveryNode(cluster.GetLocalNodeInfo().NodeId) == false && len(nodeInfo.PublicServiceList) == 1 &&
nodeInfo.PublicServiceList[0] == OriginDiscoveryClientName {
continue
}
//遍历所有的公开服务,并筛选之
for _, serviceName := range nodeInfo.PublicServiceList {
nInfo := mapNodeInfo[nodeInfo.NodeId]
if nInfo == nil {
nInfo = &rpc.NodeInfo{}
nInfo.NodeId = nodeInfo.NodeId
nInfo.ListenAddr = nodeInfo.ListenAddr
nInfo.MaxRpcParamLen = nodeInfo.MaxRpcParamLen
nInfo.Retire = nodeInfo.Retire
nInfo.Private = nodeInfo.Private
mapNodeInfo[nodeInfo.NodeId] = nInfo
}
nInfo.PublicServiceList = append(nInfo.PublicServiceList, serviceName)
}
}
//如果为完整同步,则找出差异的结点
var willDelNodeId []string
if req.IsFull == true {
diffNode := dc.fullCompareDiffNode(req.MasterNodeId, mapNodeInfo)
if len(diffNode) > 0 {
willDelNodeId = append(willDelNodeId, diffNode...)
}
}
//指定删除结点
if req.DelNodeId != rpc.NodeIdNull && req.DelNodeId != dc.localNodeId {
willDelNodeId = append(willDelNodeId, req.DelNodeId)
}
//删除不必要的结点
for _, nodeId := range willDelNodeId {
dc.funDelNode(nodeId)
dc.removeMasterNode(req.MasterNodeId, nodeId)
}
//设置新结点
for _, nodeInfo := range mapNodeInfo {
dc.addMasterNode(req.MasterNodeId, nodeInfo.NodeId, nodeInfo.PublicServiceList)
bSet := dc.setNodeInfo(req.MasterNodeId, nodeInfo)
if bSet == false {
continue
}
}
return nil
}
func (dc *OriginDiscoveryClient) OnNodeConnected(nodeId string) {
dc.regServiceDiscover(nodeId)
}
func (dc *OriginDiscoveryClient) OnRelease() {
log.Debug("OriginDiscoveryClient")
//取消注册
var nodeRetireReq rpc.UnRegServiceDiscoverReq
nodeRetireReq.NodeId = cluster.GetLocalNodeInfo().NodeId
masterNodeList := cluster.GetOriginDiscovery()
for i := 0; i < len(masterNodeList.MasterNodeList); i++ {
if masterNodeList.MasterNodeList[i].NodeId == cluster.GetLocalNodeInfo().NodeId {
continue
}
err := dc.CallNodeWithTimeout(3*time.Second, masterNodeList.MasterNodeList[i].NodeId, UnRegServiceDiscover, &nodeRetireReq, &rpc.Empty{})
if err != nil {
log.Error("call "+UnRegServiceDiscover+" is fail", log.ErrorField("err", err))
}
}
}
func (dc *OriginDiscoveryClient) OnRetire() {
dc.bRetire = true
masterNodeList := cluster.GetOriginDiscovery()
for i := 0; i < len(masterNodeList.MasterNodeList); i++ {
var nodeRetireReq rpc.NodeRetireReq
nodeRetireReq.NodeInfo = &rpc.NodeInfo{}
nodeRetireReq.NodeInfo.NodeId = cluster.localNodeInfo.NodeId
nodeRetireReq.NodeInfo.ListenAddr = cluster.localNodeInfo.ListenAddr
nodeRetireReq.NodeInfo.MaxRpcParamLen = cluster.localNodeInfo.MaxRpcParamLen
nodeRetireReq.NodeInfo.PublicServiceList = cluster.localNodeInfo.PublicServiceList
nodeRetireReq.NodeInfo.Retire = dc.bRetire
nodeRetireReq.NodeInfo.Private = cluster.localNodeInfo.Private
err := dc.GoNode(masterNodeList.MasterNodeList[i].NodeId, NodeRetireRpcMethod, &nodeRetireReq)
if err != nil {
log.Error("call "+NodeRetireRpcMethod+" is fail", log.ErrorField("err", err))
}
}
}
func (dc *OriginDiscoveryClient) tryRegServiceDiscover(nodeId string) {
dc.AfterFunc(time.Second*3, func(timer *timer.Timer) {
dc.regServiceDiscover(nodeId)
})
}
func (dc *OriginDiscoveryClient) regServiceDiscover(nodeId string) {
if nodeId == cluster.GetLocalNodeInfo().NodeId {
return
}
nodeInfo := cluster.getOriginMasterDiscoveryNodeInfo(nodeId)
if nodeInfo == nil {
return
}
var req rpc.RegServiceDiscoverReq
req.NodeInfo = &rpc.NodeInfo{}
req.NodeInfo.NodeId = cluster.localNodeInfo.NodeId
req.NodeInfo.ListenAddr = cluster.localNodeInfo.ListenAddr
req.NodeInfo.MaxRpcParamLen = cluster.localNodeInfo.MaxRpcParamLen
req.NodeInfo.PublicServiceList = cluster.localNodeInfo.PublicServiceList
req.NodeInfo.Retire = dc.bRetire
req.NodeInfo.Private = cluster.localNodeInfo.Private
log.Debug("regServiceDiscover", log.String("nodeId", nodeId))
//向Master服务同步本Node服务信息
_, err := dc.AsyncCallNodeWithTimeout(3*time.Second, nodeId, RegServiceDiscover, &req, func(res *rpc.SubscribeDiscoverNotify, err error) {
if err != nil {
log.Error("call " + RegServiceDiscover + " is fail :" + err.Error())
dc.tryRegServiceDiscover(nodeId)
return
}
dc.isRegisterOk = true
dc.RPC_SubServiceDiscover(res)
})
if err != nil {
log.Error("call " + RegServiceDiscover + " is fail :" + err.Error())
dc.tryRegServiceDiscover(nodeId)
}
}
func (dc *OriginDiscoveryClient) setNodeInfo(masterNodeId string, nodeInfo *rpc.NodeInfo) bool {
if nodeInfo == nil || nodeInfo.Private == true || nodeInfo.NodeId == dc.localNodeId {
return false
}
//筛选关注的服务
var discoverServiceSlice = make([]string, 0, 24)
for _, pubService := range nodeInfo.PublicServiceList {
if cluster.CanDiscoveryService(masterNodeId, pubService) == true {
discoverServiceSlice = append(discoverServiceSlice, pubService)
}
}
if len(discoverServiceSlice) == 0 {
return false
}
var nInfo NodeInfo
nInfo.ServiceList = discoverServiceSlice
nInfo.PublicServiceList = discoverServiceSlice
nInfo.NodeId = nodeInfo.NodeId
nInfo.ListenAddr = nodeInfo.ListenAddr
nInfo.MaxRpcParamLen = nodeInfo.MaxRpcParamLen
nInfo.Retire = nodeInfo.Retire
nInfo.Private = nodeInfo.Private
dc.funSetNode(&nInfo)
return true
}
func (dc *OriginDiscoveryClient) OnNodeDisconnect(nodeId string) {
//将Discard结点清理
cluster.DiscardNode(nodeId)
}
func (dc *OriginDiscoveryClient) InitDiscovery(localNodeId string, funDelNode FunDelNode, funSetNode FunSetNode) error {
dc.localNodeId = localNodeId
dc.funDelNode = funDelNode
dc.funSetNode = funSetNode
return nil
}
func (cls *Cluster) checkOriginDiscovery(localNodeId string) (bool, bool) {
var localMaster bool //本结点是否为Master结点
var hasMaster bool //是否配置Master服务
//遍历所有结点
for _, nodeInfo := range cls.discoveryInfo.Origin.MasterNodeList {
if nodeInfo.NodeId == localNodeId {
localMaster = true
}
hasMaster = true
}
//返回查询结果
return localMaster, hasMaster
}
func (cls *Cluster) AddDiscoveryService(serviceName string, bPublicService bool) {
addServiceList := append([]string{}, serviceName)
cls.localNodeInfo.ServiceList = append(addServiceList, cls.localNodeInfo.ServiceList...)
if bPublicService {
cls.localNodeInfo.PublicServiceList = append(cls.localNodeInfo.PublicServiceList, serviceName)
}
if _, ok := cls.mapServiceNode[serviceName]; ok == false {
cls.mapServiceNode[serviceName] = map[string]struct{}{}
}
cls.mapServiceNode[serviceName][cls.localNodeInfo.NodeId] = struct{}{}
}
func (cls *Cluster) IsOriginMasterDiscoveryNode(nodeId string) bool {
return cls.getOriginMasterDiscoveryNodeInfo(nodeId) != nil
}
func (cls *Cluster) getOriginMasterDiscoveryNodeInfo(nodeId string) *NodeInfo {
if cls.discoveryInfo.Origin == nil {
return nil
}
for i := 0; i < len(cls.discoveryInfo.Origin.MasterNodeList); i++ {
if cls.discoveryInfo.Origin.MasterNodeList[i].NodeId == nodeId {
return &cls.discoveryInfo.Origin.MasterNodeList[i]
}
}
return nil
}
func (dc *OriginDiscoveryClient) OnNatsConnected() {
masterNodes := GetCluster().GetOriginDiscovery().MasterNodeList
for i := 0; i < len(masterNodes); i++ {
dc.regServiceDiscover(masterNodes[i].NodeId)
}
}
func (dc *OriginDiscoveryClient) OnNatsDisconnect() {
}

View File

@@ -1,20 +1,197 @@
package cluster
import (
"errors"
"fmt"
"github.com/duanhf2012/origin/log"
"github.com/duanhf2012/origin/rpc"
"github.com/duanhf2012/origin/v2/log"
"github.com/duanhf2012/origin/v2/rpc"
jsoniter "github.com/json-iterator/go"
"gopkg.in/yaml.v3"
"io/fs"
"os"
"path/filepath"
"strings"
"time"
)
var json = jsoniter.ConfigCompatibleWithStandardLibrary
type EtcdList struct {
NetworkName []string
Endpoints []string
UserName string
Password string
Cert string
CertKey string
Ca string
}
type EtcdDiscovery struct {
DialTimeoutMillisecond time.Duration
TTLSecond int64
EtcdList []EtcdList
}
type OriginDiscovery struct {
TTLSecond int64
MasterNodeList []NodeInfo
}
type DiscoveryType int
const (
InvalidType = 0
OriginType = 1
EtcdType = 2
)
const MinTTL = 3
type DiscoveryInfo struct {
discoveryType DiscoveryType
Etcd *EtcdDiscovery //etcd
Origin *OriginDiscovery //origin
}
type NatsConfig struct {
NatsUrl string
NoRandomize bool
}
type RpcMode struct {
Typ string `json:"Type"`
Nats NatsConfig
}
type NodeInfoList struct {
MasterDiscoveryNode []NodeInfo //用于服务发现Node
NodeList []NodeInfo
RpcMode RpcMode
Discovery DiscoveryInfo
NodeList []NodeInfo
}
func validConfigFile(f string) bool {
return strings.HasSuffix(f, ".json")|| strings.HasSuffix(f, ".yml") || strings.HasSuffix(f, ".yaml")
}
func yamlToJson(data []byte, v interface{}) ([]byte, error) {
mapKeyData := map[string]interface{}{}
err := yaml.Unmarshal(data, &mapKeyData)
if err != nil {
return nil, err
}
data, err = json.Marshal(mapKeyData)
if err != nil {
return nil, err
}
return data, nil
}
func unmarshalConfig(data []byte, v interface{}) error {
envData := []byte(os.ExpandEnv(string(data)))
if !json.Valid(envData) {
var err error
envData, err = yamlToJson(envData, v)
if err != nil {
return err
}
}
return json.Unmarshal(envData, v)
}
func (d *DiscoveryInfo) getDiscoveryType() DiscoveryType {
return d.discoveryType
}
func (d *DiscoveryInfo) setDiscovery(discoveryInfo *DiscoveryInfo) error {
var err error
err = d.setOrigin(discoveryInfo.Origin)
if err != nil {
return err
}
err = d.setEtcd(discoveryInfo.Etcd)
if err != nil {
return err
}
return nil
}
func (d *DiscoveryInfo) setEtcd(etcd *EtcdDiscovery) error {
if etcd == nil {
return nil
}
if d.discoveryType != InvalidType {
return fmt.Errorf("repeat configuration of Discovery")
}
//Endpoints不允许重复
mapAddr := make(map[string]struct{})
for _, n := range etcd.EtcdList {
for _, endPoint := range n.Endpoints {
if _, ok := mapAddr[endPoint]; ok == true {
return fmt.Errorf("etcd discovery config Etcd.EtcdList.Endpoints %+v is repeat", endPoint)
}
mapAddr[endPoint] = struct{}{}
}
//networkName不允许重复
mapNetworkName := make(map[string]struct{})
for _, netName := range n.NetworkName {
if _, ok := mapNetworkName[netName]; ok == true {
return fmt.Errorf("etcd discovery config Etcd.EtcdList.NetworkName %+v is repeat", n.NetworkName)
}
mapNetworkName[netName] = struct{}{}
}
}
if etcd.TTLSecond < MinTTL {
etcd.TTLSecond = MinTTL
}
etcd.DialTimeoutMillisecond = etcd.DialTimeoutMillisecond * time.Millisecond
d.Etcd = etcd
d.discoveryType = EtcdType
return nil
}
func (d *DiscoveryInfo) setOrigin(originDiscovery *OriginDiscovery) error {
if originDiscovery == nil || len(originDiscovery.MasterNodeList) == 0 {
return nil
}
if d.discoveryType != InvalidType {
return fmt.Errorf("repeat configuration of Discovery")
}
mapListenAddr := make(map[string]struct{})
mapNodeId := make(map[string]struct{})
for _, n := range originDiscovery.MasterNodeList {
if _, ok := mapListenAddr[n.ListenAddr]; ok == true {
return fmt.Errorf("discovery config Origin.ListenAddr %s is repeat", n.ListenAddr)
}
mapListenAddr[n.ListenAddr] = struct{}{}
if _, ok := mapNodeId[n.NodeId]; ok == true {
return fmt.Errorf("discovery config Origin.NodeId %s is repeat", n.NodeId)
}
mapNodeId[n.NodeId] = struct{}{}
}
d.Origin = originDiscovery
if d.Origin.TTLSecond < MinTTL {
d.Origin.TTLSecond = MinTTL
}
d.discoveryType = OriginType
return nil
}
func (cls *Cluster) ReadClusterConfig(filepath string) (*NodeInfoList, error) {
@@ -23,7 +200,7 @@ func (cls *Cluster) ReadClusterConfig(filepath string) (*NodeInfoList, error) {
if err != nil {
return nil, err
}
err = json.Unmarshal(d, c)
err = unmarshalConfig(d, c)
if err != nil {
return nil, err
}
@@ -31,14 +208,14 @@ func (cls *Cluster) ReadClusterConfig(filepath string) (*NodeInfoList, error) {
return c, nil
}
func (cls *Cluster) readServiceConfig(filepath string) (interface{}, map[string]interface{}, map[int]map[string]interface{}, error) {
func (cls *Cluster) readServiceConfig(filepath string) (interface{}, map[string]interface{}, map[string]map[string]interface{}, error) {
c := map[string]interface{}{}
//读取配置
d, err := os.ReadFile(filepath)
if err != nil {
return nil, nil, nil, err
}
err = json.Unmarshal(d, &c)
err = unmarshalConfig(d, &c)
if err != nil {
return nil, nil, nil, err
}
@@ -50,7 +227,7 @@ func (cls *Cluster) readServiceConfig(filepath string) (interface{}, map[string]
serviceConfig = serviceCfg.(map[string]interface{})
}
mapNodeService := map[int]map[string]interface{}{}
mapNodeService := map[string]map[string]interface{}{}
nodeServiceCfg, ok := c["NodeService"]
if ok == true {
nodeServiceList := nodeServiceCfg.([]interface{})
@@ -60,43 +237,90 @@ func (cls *Cluster) readServiceConfig(filepath string) (interface{}, map[string]
if ok == false {
log.Fatal("NodeService list not find nodeId field")
}
mapNodeService[int(nodeId.(float64))] = serviceCfg
mapNodeService[nodeId.(string)] = serviceCfg
}
}
return GlobalCfg, serviceConfig, mapNodeService, nil
}
func (cls *Cluster) readLocalClusterConfig(nodeId int) ([]NodeInfo, []NodeInfo, error) {
var nodeInfoList []NodeInfo
var masterDiscoverNodeList []NodeInfo
clusterCfgPath := strings.TrimRight(configDir, "/") + "/cluster"
fileInfoList, err := os.ReadDir(clusterCfgPath)
if err != nil {
return nil, nil, fmt.Errorf("Read dir %s is fail :%+v", clusterCfgPath, err)
func (cls *Cluster) SetRpcMode(cfgRpcMode *RpcMode, rpcMode *RpcMode) error {
//忽略掉没有设置的配置
if cfgRpcMode.Typ == "" {
return nil
}
//不允许重复的配置Rpc模式
if cfgRpcMode.Typ != "" && rpcMode.Typ != "" {
return errors.New("repeat config RpcMode")
}
//检查Typ是否合法
if cfgRpcMode.Typ != "Nats" && cfgRpcMode.Typ != "Default" {
return fmt.Errorf("RpcMode %s is not support", rpcMode.Typ)
}
if cfgRpcMode.Typ == "Nats" && len(cfgRpcMode.Nats.NatsUrl) == 0 {
return fmt.Errorf("nats rpc mode config NatsUrl is empty")
}
*rpcMode = *cfgRpcMode
return nil
}
func (cls *Cluster) readLocalClusterConfig(nodeId string) (DiscoveryInfo, []NodeInfo, RpcMode, error) {
var nodeInfoList []NodeInfo
var discoveryInfo DiscoveryInfo
var rpcMode RpcMode
//读取任何文件,只读符合格式的配置,目录下的文件可以自定义分文件
for _, f := range fileInfoList {
if f.IsDir() == false {
filePath := strings.TrimRight(strings.TrimRight(clusterCfgPath, "/"), "\\") + "/" + f.Name()
localNodeInfoList, err := cls.ReadClusterConfig(filePath)
if err != nil {
return nil, nil, fmt.Errorf("read file path %s is error:%+v", filePath, err)
}
masterDiscoverNodeList = append(masterDiscoverNodeList, localNodeInfoList.MasterDiscoveryNode...)
for _, nodeInfo := range localNodeInfoList.NodeList {
if nodeInfo.NodeId == nodeId || nodeId == 0 {
nodeInfoList = append(nodeInfoList, nodeInfo)
}
err := filepath.Walk(configDir, func(path string, info fs.FileInfo, err error)error {
if info.IsDir() {
return nil
}
if err != nil {
return err
}
if !validConfigFile(info.Name()) {
return nil
}
fileNodeInfoList, rErr := cls.ReadClusterConfig(path)
if rErr != nil {
return fmt.Errorf("read file path %s is error:%+v", path, rErr)
}
err = cls.SetRpcMode(&fileNodeInfoList.RpcMode, &rpcMode)
if err != nil {
return err
}
err = discoveryInfo.setDiscovery(&fileNodeInfoList.Discovery)
if err != nil {
return err
}
for _, nodeInfo := range fileNodeInfoList.NodeList {
if nodeInfo.NodeId == nodeId || nodeId == rpc.NodeIdNull {
nodeInfoList = append(nodeInfoList, nodeInfo)
}
}
return nil
})
if err != nil {
return discoveryInfo, nil, rpcMode, err
}
if nodeId != 0 && (len(nodeInfoList) != 1) {
return nil, nil, fmt.Errorf("%d configurations were found for the configuration with node ID %d!", len(nodeInfoList), nodeId)
if nodeId != rpc.NodeIdNull && (len(nodeInfoList) != 1) {
return discoveryInfo, nil, rpcMode, fmt.Errorf("nodeid %s configuration error in NodeList", nodeId)
}
for i, _ := range nodeInfoList {
for i := range nodeInfoList {
for j, s := range nodeInfoList[i].ServiceList {
//私有结点不加入到Public服务列表中
if strings.HasPrefix(s, "_") == false && nodeInfoList[i].Private == false {
@@ -107,40 +331,36 @@ func (cls *Cluster) readLocalClusterConfig(nodeId int) ([]NodeInfo, []NodeInfo,
}
}
return masterDiscoverNodeList, nodeInfoList, nil
return discoveryInfo, nodeInfoList, rpcMode, nil
}
func (cls *Cluster) readLocalService(localNodeId int) error {
clusterCfgPath := strings.TrimRight(configDir, "/") + "/cluster"
fileInfoList, err := os.ReadDir(clusterCfgPath)
if err != nil {
return fmt.Errorf("Read dir %s is fail :%+v", clusterCfgPath, err)
}
var globalCfg interface{}
func (cls *Cluster) readLocalService(localNodeId string) error {
var globalCfg interface{}
publicService := map[string]interface{}{}
nodeService := map[string]interface{}{}
//读取任何文件,只读符合格式的配置,目录下的文件可以自定义分文件
for _, f := range fileInfoList {
if f.IsDir() == true {
continue
err := filepath.Walk(configDir, func(path string, info fs.FileInfo, err error)error{
if info.IsDir() {
return nil
}
if filepath.Ext(f.Name())!= ".json" {
continue
}
filePath := strings.TrimRight(strings.TrimRight(clusterCfgPath, "/"), "\\") + "/" + f.Name()
currGlobalCfg, serviceConfig, mapNodeService, err := cls.readServiceConfig(filePath)
if err != nil {
continue
return err
}
if !validConfigFile(info.Name()) {
return nil
}
currGlobalCfg, serviceConfig, mapNodeService, err := cls.readServiceConfig(path)
if err != nil {
return err
}
if currGlobalCfg != nil {
//不允许重复的配置global配置
if globalCfg != nil {
return fmt.Errorf("[Global] does not allow repeated configuration in %s.",f.Name())
return fmt.Errorf("[Global] does not allow repeated configuration in %s", info.Name())
}
globalCfg = currGlobalCfg
}
@@ -148,17 +368,21 @@ func (cls *Cluster) readLocalService(localNodeId int) error {
//保存公共配置
for _, s := range cls.localNodeInfo.ServiceList {
for {
splitServiceName := strings.Split(s, ":")
if len(splitServiceName) == 2 {
s = splitServiceName[0]
}
//取公共服务配置
pubCfg, ok := serviceConfig[s]
if ok == true {
if _,publicOk := publicService[s];publicOk == true {
return fmt.Errorf("public service [%s] does not allow repeated configuration in %s.",s,f.Name())
if _, publicOk := publicService[s]; publicOk == true {
return fmt.Errorf("public service [%s] does not allow repeated configuration in %s", s, info.Name())
}
publicService[s] = pubCfg
}
//取指定结点配置的服务
nodeServiceCfg,ok := mapNodeService[localNodeId]
nodeServiceCfg, ok := mapNodeService[localNodeId]
if ok == false {
break
}
@@ -167,30 +391,40 @@ func (cls *Cluster) readLocalService(localNodeId int) error {
break
}
if _,nodeOK := nodeService[s];nodeOK == true {
return fmt.Errorf("NodeService NodeId[%d] Service[%s] does not allow repeated configuration in %s.",cls.localNodeInfo.NodeId,s,f.Name())
if _, nodeOK := nodeService[s]; nodeOK == true {
return fmt.Errorf("NodeService NodeId[%s] Service[%s] does not allow repeated configuration in %s", cls.localNodeInfo.NodeId, s, info.Name())
}
nodeService[s] = nodeCfg
break
}
}
return nil
})
if err != nil {
return err
}
//组合所有的配置
for _, s := range cls.localNodeInfo.ServiceList {
splitServiceName := strings.Split(s, ":")
if len(splitServiceName) == 2 {
s = splitServiceName[0]
}
//先从NodeService中找
var serviceCfg interface{}
var ok bool
serviceCfg,ok = nodeService[s]
serviceCfg, ok = nodeService[s]
if ok == true {
cls.localServiceCfg[s] =serviceCfg
cls.localServiceCfg[s] = serviceCfg
continue
}
//如果找不到从PublicService中找
serviceCfg,ok = publicService[s]
serviceCfg, ok = publicService[s]
if ok == true {
cls.localServiceCfg[s] =serviceCfg
cls.localServiceCfg[s] = serviceCfg
}
}
cls.globalCfg = globalCfg
@@ -198,47 +432,59 @@ func (cls *Cluster) readLocalService(localNodeId int) error {
return nil
}
func (cls *Cluster) parseLocalCfg() {
cls.mapIdNode[cls.localNodeInfo.NodeId] = cls.localNodeInfo
func (cls *Cluster) parseLocalCfg() error{
rpcInfo := NodeRpcInfo{}
rpcInfo.nodeInfo = cls.localNodeInfo
rpcInfo.client = rpc.NewLClient(rpcInfo.nodeInfo.NodeId, &cls.callSet)
for _, sName := range cls.localNodeInfo.ServiceList {
if _, ok := cls.mapServiceNode[sName]; ok == false {
cls.mapServiceNode[sName] = make(map[int]struct{})
}
cls.mapServiceNode[sName][cls.localNodeInfo.NodeId] = struct{}{}
}
}
func (cls *Cluster) checkDiscoveryNodeList(discoverMasterNode []NodeInfo) bool {
for i := 0; i < len(discoverMasterNode)-1; i++ {
for j := i + 1; j < len(discoverMasterNode); j++ {
if discoverMasterNode[i].NodeId == discoverMasterNode[j].NodeId ||
discoverMasterNode[i].ListenAddr == discoverMasterNode[j].ListenAddr {
return false
cls.mapRpc[cls.localNodeInfo.NodeId] = &rpcInfo
for _, serviceName := range cls.localNodeInfo.ServiceList {
splitServiceName := strings.Split(serviceName, ":")
if len(splitServiceName) == 2 {
serviceName = splitServiceName[0]
templateServiceName := splitServiceName[1]
//记录模板
if _, ok := cls.mapTemplateServiceNode[templateServiceName]; ok == false {
cls.mapTemplateServiceNode[templateServiceName] = map[string]struct{}{}
}
cls.mapTemplateServiceNode[templateServiceName][serviceName] = struct{}{}
}
if _, ok := cls.mapServiceNode[serviceName]; ok == false {
cls.mapServiceNode[serviceName] = make(map[string]struct{})
}
if _,ok:=cls.mapServiceNode[serviceName][cls.localNodeInfo.NodeId];ok {
return fmt.Errorf("duplicate service %s is configured in node %s", serviceName, cls.localNodeInfo.NodeId)
}
cls.mapServiceNode[serviceName][cls.localNodeInfo.NodeId] = struct{}{}
}
return true
return nil
}
func (cls *Cluster) InitCfg(localNodeId int) error {
func (cls *Cluster) IsNatsMode() bool {
return cls.rpcMode.Typ == "Nats"
}
func (cls *Cluster) GetNatsUrl() string {
return cls.rpcMode.Nats.NatsUrl
}
func (cls *Cluster) InitCfg(localNodeId string) error {
cls.localServiceCfg = map[string]interface{}{}
cls.mapRpc = map[int]NodeRpcInfo{}
cls.mapIdNode = map[int]NodeInfo{}
cls.mapServiceNode = map[string]map[int]struct{}{}
cls.mapRpc = map[string]*NodeRpcInfo{}
cls.mapServiceNode = map[string]map[string]struct{}{}
cls.mapTemplateServiceNode = map[string]map[string]struct{}{}
//加载本地结点的NodeList配置
discoveryNode, nodeInfoList, err := cls.readLocalClusterConfig(localNodeId)
discoveryInfo, nodeInfoList, rpcMode, err := cls.readLocalClusterConfig(localNodeId)
if err != nil {
return err
}
cls.localNodeInfo = nodeInfoList[0]
if cls.checkDiscoveryNodeList(discoveryNode) == false {
return fmt.Errorf("DiscoveryNode config is error!")
}
cls.masterDiscoveryNodeList = discoveryNode
cls.discoveryInfo = discoveryInfo
cls.rpcMode = rpcMode
//读取本地服务配置
err = cls.readLocalService(localNodeId)
@@ -247,8 +493,7 @@ func (cls *Cluster) InitCfg(localNodeId int) error {
}
//本地配置服务加到全局map信息中
cls.parseLocalCfg()
return nil
return cls.parseLocalCfg()
}
func (cls *Cluster) IsConfigService(serviceName string) bool {
@@ -263,26 +508,54 @@ func (cls *Cluster) IsConfigService(serviceName string) bool {
return ok
}
func (cls *Cluster) GetNodeIdByService(serviceName string, rpcClientList []*rpc.Client, bAll bool) (error, int) {
func (cls *Cluster) GetNodeIdByTemplateService(templateServiceName string, rpcClientList []*rpc.Client, filterRetire bool) (error, []*rpc.Client) {
cls.locker.RLock()
defer cls.locker.RUnlock()
mapNodeId, ok := cls.mapServiceNode[serviceName]
count := 0
if ok == true {
for nodeId, _ := range mapNodeId {
pClient := GetCluster().getRpcClient(nodeId)
if pClient == nil || (bAll == false && pClient.IsConnected() == false) {
continue
}
rpcClientList[count] = pClient
count++
if count >= cap(rpcClientList) {
break
mapServiceName := cls.mapTemplateServiceNode[templateServiceName]
for serviceName := range mapServiceName {
mapNodeId, ok := cls.mapServiceNode[serviceName]
if ok == true {
for nodeId := range mapNodeId {
pClient, retire := GetCluster().getRpcClient(nodeId)
if pClient == nil || pClient.IsConnected() == false {
continue
}
//如果需要筛选掉退休的对retire状态的结点略过
if filterRetire == true && retire == true {
continue
}
rpcClientList = append(rpcClientList, pClient)
}
}
}
return nil, count
return nil, rpcClientList
}
func (cls *Cluster) GetNodeIdByService(serviceName string, rpcClientList []*rpc.Client, filterRetire bool) (error, []*rpc.Client) {
cls.locker.RLock()
defer cls.locker.RUnlock()
mapNodeId, ok := cls.mapServiceNode[serviceName]
if ok == true {
for nodeId := range mapNodeId {
pClient, retire := GetCluster().getRpcClient(nodeId)
if pClient == nil || pClient.IsConnected() == false {
continue
}
//如果需要筛选掉退休的对retire状态的结点略过
if filterRetire == true && retire == true {
continue
}
rpcClientList = append(rpcClientList, pClient)
}
}
return nil, rpcClientList
}
func (cls *Cluster) GetServiceCfg(serviceName string) interface{} {

View File

@@ -1,12 +1,12 @@
package cluster
type OperType int
type FunDelNode func (nodeId int,immediately bool)
type FunSetNodeInfo func(nodeInfo *NodeInfo)
type FunDelNode func (nodeId string)
type FunSetNode func(nodeInfo *NodeInfo)
type IServiceDiscovery interface {
InitDiscovery(localNodeId int,funDelNode FunDelNode,funSetNodeInfo FunSetNodeInfo) error
OnNodeStop()
InitDiscovery(localNodeId string,funDelNode FunDelNode,funSetNodeInfo FunSetNode) error
}

View File

@@ -4,7 +4,8 @@ import (
"errors"
"runtime"
"github.com/duanhf2012/origin/log"
"github.com/duanhf2012/origin/v2/log"
"sync/atomic"
)
const defaultMaxTaskChannelNum = 1000000
@@ -21,9 +22,12 @@ type Concurrent struct {
tasks chan task
cbChannel chan func(error)
open int32
}
/*
OpenConcurrentByNumCPU 函数使用说明
cpuMul 表示cpu的倍数
建议:(1)cpu密集型 使用1 (2)i/o密集型使用2或者更高
*/
@@ -33,6 +37,10 @@ func (c *Concurrent) OpenConcurrentByNumCPU(cpuNumMul float32) {
}
func (c *Concurrent) OpenConcurrent(minGoroutineNum int32, maxGoroutineNum int32, maxTaskChannelNum int) {
if atomic.AddInt32(&c.open, 1) > 1 {
panic("repeated calls to OpenConcurrent are not allowed!")
}
c.tasks = make(chan task, maxTaskChannelNum)
c.cbChannel = make(chan func(error), maxTaskChannelNum)
@@ -50,7 +58,7 @@ func (c *Concurrent) AsyncDoByQueue(queueId int64, fn func() bool, cb func(err e
}
if fn == nil && cb == nil {
log.Stack("fn and cb is nil")
log.StackError("fn and cb is nil")
return
}
@@ -60,7 +68,7 @@ func (c *Concurrent) AsyncDoByQueue(queueId int64, fn func() bool, cb func(err e
}
if queueId != 0 {
queueId = queueId % maxTaskQueueSessionId+1
queueId = queueId%maxTaskQueueSessionId + 1
}
select {

View File

@@ -6,13 +6,15 @@ import (
"time"
"fmt"
"runtime"
"github.com/duanhf2012/origin/log"
"github.com/duanhf2012/origin/util/queue"
"context"
"github.com/duanhf2012/origin/v2/log"
"github.com/duanhf2012/origin/v2/util/queue"
)
var idleTimeout = int64(2 * time.Second)
const maxTaskQueueSessionId = 10000
type dispatch struct {
@@ -30,6 +32,9 @@ type dispatch struct {
waitWorker sync.WaitGroup
waitDispatch sync.WaitGroup
cancelContext context.Context
cancel context.CancelFunc
}
func (d *dispatch) open(minGoroutineNum int32, maxGoroutineNum int32, tasks chan task, cbChannel chan func(error)) {
@@ -40,7 +45,7 @@ func (d *dispatch) open(minGoroutineNum int32, maxGoroutineNum int32, tasks chan
d.workerQueue = make(chan task)
d.cbChannel = cbChannel
d.queueIdChannel = make(chan int64, cap(tasks))
d.cancelContext, d.cancel = context.WithCancel(context.Background())
d.waitDispatch.Add(1)
go d.run()
}
@@ -52,7 +57,7 @@ func (d *dispatch) run() {
for {
select {
case queueId := <-d.queueIdChannel:
d.processqueueEvent(queueId)
d.processQueueEvent(queueId)
default:
select {
case t, ok := <-d.tasks:
@@ -61,13 +66,15 @@ func (d *dispatch) run() {
}
d.processTask(&t)
case queueId := <-d.queueIdChannel:
d.processqueueEvent(queueId)
d.processQueueEvent(queueId)
case <-timeout.C:
d.processTimer()
if atomic.LoadInt32(&d.minConcurrentNum) == -1 && len(d.tasks) == 0 {
atomic.StoreInt64(&idleTimeout,int64(time.Millisecond * 10))
}
case <-d.cancelContext.Done():
atomic.StoreInt64(&idleTimeout, int64(time.Millisecond*5))
timeout.Reset(time.Duration(atomic.LoadInt64(&idleTimeout)))
for i := int32(0); i < d.workerNum; i++ {
d.processIdle()
}
}
}
@@ -87,7 +94,7 @@ func (d *dispatch) processTimer() {
d.idle = true
}
func (d *dispatch) processqueueEvent(queueId int64) {
func (d *dispatch) processQueueEvent(queueId int64) {
d.idle = false
queueSession := d.mapTaskQueueSession[queueId]
@@ -155,17 +162,18 @@ func (d *dispatch) pushQueueTaskFinishEvent(queueId int64) {
d.queueIdChannel <- queueId
}
func (c *dispatch) pushAsyncDoCallbackEvent(cb func(err error)) {
func (d *dispatch) pushAsyncDoCallbackEvent(cb func(err error)) {
if cb == nil {
//不需要回调的情况
return
}
c.cbChannel <- cb
d.cbChannel <- cb
}
func (d *dispatch) close() {
atomic.StoreInt32(&d.minConcurrentNum, -1)
d.cancel()
breakFor:
for {
@@ -184,10 +192,7 @@ breakFor:
func (d *dispatch) DoCallback(cb func(err error)) {
defer func() {
if r := recover(); r != nil {
buf := make([]byte, 4096)
l := runtime.Stack(buf, false)
errString := fmt.Sprint(r)
log.Dump(string(buf[:l]),log.String("error",errString))
log.StackError(fmt.Sprint(r))
}
}()

View File

@@ -5,9 +5,9 @@ import (
"errors"
"fmt"
"runtime"
"github.com/duanhf2012/origin/log"
"github.com/duanhf2012/origin/v2/log"
)
type task struct {
@@ -51,24 +51,22 @@ func (w *worker) run(waitGroup *sync.WaitGroup, t task) {
func (w *worker) exec(t *task) {
defer func() {
if r := recover(); r != nil {
buf := make([]byte, 4096)
l := runtime.Stack(buf, false)
errString := fmt.Sprint(r)
cb := t.cb
t.cb = func(err error) {
cb(errors.New(errString))
}
log.Dump(string(buf[:l]),log.String("error",errString))
w.endCallFun(true,t)
log.StackError(errString)
w.endCallFun(true, t)
}
}()
w.endCallFun(t.fn(),t)
w.endCallFun(t.fn(), t)
}
func (w *worker) endCallFun(isDocallBack bool,t *task) {
if isDocallBack {
func (w *worker) endCallFun(isDoCallBack bool, t *task) {
if isDoCallBack {
w.pushAsyncDoCallbackEvent(t.cb)
}

View File

@@ -8,47 +8,47 @@ import (
type valueType int
type CommandFunctionCB func(args interface{}) error
var commandList []*command
var programName string
const(
boolType valueType = 0
const (
boolType valueType = 0
stringType valueType = 1
intType valueType = 2
intType valueType = 2
)
type command struct{
valType valueType
name string
bValue bool
type command struct {
valType valueType
name string
bValue bool
strValue string
intValue int
usage string
fn CommandFunctionCB
usage string
fn CommandFunctionCB
}
func (cmd *command) execute() error{
func (cmd *command) execute() error {
if cmd.valType == boolType {
return cmd.fn(cmd.bValue)
}else if cmd.valType == stringType {
} else if cmd.valType == stringType {
return cmd.fn(cmd.strValue)
}else if cmd.valType == intType {
} else if cmd.valType == intType {
return cmd.fn(cmd.intValue)
}else{
return fmt.Errorf("Unknow command type.")
}
return nil
return fmt.Errorf("unknow command type")
}
func Run(args []string) error {
flag.Parse()
programName = args[0]
if flag.NFlag() <= 0 {
return fmt.Errorf("Command input parameter error,try `%s -help` for help",args[0])
return fmt.Errorf("Command input parameter error,try `%s -help` for help", args[0])
}
var startCmd *command
for _,val := range commandList {
for _, val := range commandList {
if val.name == "start" {
startCmd = val
continue
@@ -63,50 +63,50 @@ func Run(args []string) error {
return startCmd.execute()
}
return fmt.Errorf("Command input parameter error,try `%s -help` for help",args[0])
return fmt.Errorf("Command input parameter error,try `%s -help` for help", args[0])
}
func RegisterCommandBool(cmdName string, defaultValue bool, usage string,fn CommandFunctionCB){
func RegisterCommandBool(cmdName string, defaultValue bool, usage string, fn CommandFunctionCB) {
var cmd command
cmd.valType = boolType
cmd.name = cmdName
cmd.fn = fn
cmd.usage = usage
flag.BoolVar(&cmd.bValue, cmdName, defaultValue, usage)
commandList = append(commandList,&cmd)
commandList = append(commandList, &cmd)
}
func RegisterCommandInt(cmdName string, defaultValue int, usage string,fn CommandFunctionCB){
func RegisterCommandInt(cmdName string, defaultValue int, usage string, fn CommandFunctionCB) {
var cmd command
cmd.valType = intType
cmd.name = cmdName
cmd.fn = fn
cmd.usage = usage
flag.IntVar(&cmd.intValue, cmdName, defaultValue, usage)
commandList = append(commandList,&cmd)
commandList = append(commandList, &cmd)
}
func RegisterCommandString(cmdName string, defaultValue string, usage string,fn CommandFunctionCB){
func RegisterCommandString(cmdName string, defaultValue string, usage string, fn CommandFunctionCB) {
var cmd command
cmd.valType = stringType
cmd.name = cmdName
cmd.fn = fn
cmd.usage = usage
flag.StringVar(&cmd.strValue, cmdName, defaultValue, usage)
commandList = append(commandList,&cmd)
commandList = append(commandList, &cmd)
}
func PrintDefaults(){
func PrintDefaults() {
fmt.Fprintf(os.Stderr, "Options:\n")
for _,val := range commandList {
fmt.Fprintf(os.Stderr, " -%-10s%10s\n",val.name,val.usage)
for _, val := range commandList {
fmt.Fprintf(os.Stderr, " -%-10s%10s\n", val.name, val.usage)
}
}
func GetParamStringVal(paramName string) string{
for _,cmd := range commandList {
if cmd.name == paramName{
func GetParamStringVal(paramName string) string {
for _, cmd := range commandList {
if cmd.name == paramName {
return cmd.strValue
}
}
@@ -114,11 +114,11 @@ func GetParamStringVal(paramName string) string{
}
func GetParamBoolVal(paramName string) bool {
for _,cmd := range commandList {
if cmd.name == paramName{
for _, cmd := range commandList {
if cmd.name == paramName {
return cmd.bValue
}
}
return false
}
}

View File

@@ -2,12 +2,11 @@ package event
import (
"fmt"
"github.com/duanhf2012/origin/log"
"runtime"
"github.com/duanhf2012/origin/v2/log"
"sync"
)
//事件接受器
// EventCallBack 事件接受器
type EventCallBack func(event IEvent)
type IEvent interface {
@@ -17,10 +16,14 @@ type IEvent interface {
type Event struct {
Type EventType
Data interface{}
ref bool
IntExt [2]int64
StringExt [2]string
AnyExt [2]any
ref bool
}
var emptyEvent Event
func (e *Event) Reset() {
*e = emptyEvent
}
@@ -37,18 +40,18 @@ func (e *Event) UnRef() {
e.ref = false
}
func (e *Event) GetEventType() EventType{
func (e *Event) GetEventType() EventType {
return e.Type
}
type IEventHandler interface {
Init(processor IEventProcessor)
GetEventProcessor() IEventProcessor //获得事件
GetEventProcessor() IEventProcessor //获得事件
NotifyEvent(IEvent)
Destroy()
//注册了事件
addRegInfo(eventType EventType,eventProcessor IEventProcessor)
removeRegInfo(eventType EventType,eventProcessor IEventProcessor)
addRegInfo(eventType EventType, eventProcessor IEventProcessor)
removeRegInfo(eventType EventType, eventProcessor IEventProcessor)
}
type IEventChannel interface {
@@ -60,11 +63,11 @@ type IEventProcessor interface {
Init(eventChannel IEventChannel)
EventHandler(ev IEvent)
RegEventReceiverFunc(eventType EventType, receiver IEventHandler,callback EventCallBack)
RegEventReceiverFunc(eventType EventType, receiver IEventHandler, callback EventCallBack)
UnRegEventReceiverFun(eventType EventType, receiver IEventHandler)
castEvent(event IEvent) //广播事件
addBindEvent(eventType EventType, receiver IEventHandler,callback EventCallBack)
addBindEvent(eventType EventType, receiver IEventHandler, callback EventCallBack)
addListen(eventType EventType, receiver IEventHandler)
removeBindEvent(eventType EventType, receiver IEventHandler)
removeListen(eventType EventType, receiver IEventHandler)
@@ -75,116 +78,115 @@ type EventHandler struct {
eventProcessor IEventProcessor
//已经注册的事件
locker sync.RWMutex
mapRegEvent map[EventType]map[IEventProcessor]interface{} //向其他事件处理器监听的事件类型
locker sync.RWMutex
mapRegEvent map[EventType]map[IEventProcessor]interface{} //向其他事件处理器监听的事件类型
}
type EventProcessor struct {
IEventChannel
locker sync.RWMutex
mapListenerEvent map[EventType]map[IEventProcessor]int //监听者信息
mapBindHandlerEvent map[EventType]map[IEventHandler]EventCallBack//收到事件处理
locker sync.RWMutex
mapListenerEvent map[EventType]map[IEventProcessor]int //监听者信息
mapBindHandlerEvent map[EventType]map[IEventHandler]EventCallBack //收到事件处理
}
func NewEventHandler() IEventHandler{
func NewEventHandler() IEventHandler {
eh := EventHandler{}
eh.mapRegEvent = map[EventType]map[IEventProcessor]interface{}{}
return &eh
}
func NewEventProcessor() IEventProcessor{
func NewEventProcessor() IEventProcessor {
ep := EventProcessor{}
ep.mapListenerEvent = map[EventType]map[IEventProcessor]int{}
ep.mapListenerEvent = map[EventType]map[IEventProcessor]int{}
ep.mapBindHandlerEvent = map[EventType]map[IEventHandler]EventCallBack{}
return &ep
}
func (handler *EventHandler) addRegInfo(eventType EventType,eventProcessor IEventProcessor){
func (handler *EventHandler) addRegInfo(eventType EventType, eventProcessor IEventProcessor) {
handler.locker.Lock()
defer handler.locker.Unlock()
if handler.mapRegEvent == nil {
handler.mapRegEvent = map[EventType]map[IEventProcessor]interface{}{}
}
if _,ok := handler.mapRegEvent[eventType] ;ok == false{
if _, ok := handler.mapRegEvent[eventType]; ok == false {
handler.mapRegEvent[eventType] = map[IEventProcessor]interface{}{}
}
handler.mapRegEvent[eventType][eventProcessor] = nil
}
func (handler *EventHandler) removeRegInfo(eventType EventType,eventProcessor IEventProcessor){
if _,ok := handler.mapRegEvent[eventType];ok == true {
delete(handler.mapRegEvent[eventType],eventProcessor)
func (handler *EventHandler) removeRegInfo(eventType EventType, eventProcessor IEventProcessor) {
if _, ok := handler.mapRegEvent[eventType]; ok == true {
delete(handler.mapRegEvent[eventType], eventProcessor)
}
}
func (handler *EventHandler) GetEventProcessor() IEventProcessor{
func (handler *EventHandler) GetEventProcessor() IEventProcessor {
return handler.eventProcessor
}
func (handler *EventHandler) NotifyEvent(ev IEvent){
func (handler *EventHandler) NotifyEvent(ev IEvent) {
handler.GetEventProcessor().castEvent(ev)
}
func (handler *EventHandler) Init(processor IEventProcessor){
func (handler *EventHandler) Init(processor IEventProcessor) {
handler.eventProcessor = processor
handler.mapRegEvent =map[EventType]map[IEventProcessor]interface{}{}
handler.mapRegEvent = map[EventType]map[IEventProcessor]interface{}{}
}
func (processor *EventProcessor) Init(eventChannel IEventChannel){
func (processor *EventProcessor) Init(eventChannel IEventChannel) {
processor.IEventChannel = eventChannel
}
func (processor *EventProcessor) addBindEvent(eventType EventType, receiver IEventHandler,callback EventCallBack){
func (processor *EventProcessor) addBindEvent(eventType EventType, receiver IEventHandler, callback EventCallBack) {
processor.locker.Lock()
defer processor.locker.Unlock()
if _,ok := processor.mapBindHandlerEvent[eventType]; ok == false {
if _, ok := processor.mapBindHandlerEvent[eventType]; ok == false {
processor.mapBindHandlerEvent[eventType] = map[IEventHandler]EventCallBack{}
}
processor.mapBindHandlerEvent[eventType][receiver] = callback
}
func (processor *EventProcessor) addListen(eventType EventType, receiver IEventHandler){
func (processor *EventProcessor) addListen(eventType EventType, receiver IEventHandler) {
processor.locker.Lock()
defer processor.locker.Unlock()
if _,ok := processor.mapListenerEvent[eventType];ok == false{
if _, ok := processor.mapListenerEvent[eventType]; ok == false {
processor.mapListenerEvent[eventType] = map[IEventProcessor]int{}
}
processor.mapListenerEvent[eventType][receiver.GetEventProcessor()] += 1
}
func (processor *EventProcessor) removeBindEvent(eventType EventType, receiver IEventHandler){
func (processor *EventProcessor) removeBindEvent(eventType EventType, receiver IEventHandler) {
processor.locker.Lock()
defer processor.locker.Unlock()
if _,ok := processor.mapBindHandlerEvent[eventType];ok == true{
if _, ok := processor.mapBindHandlerEvent[eventType]; ok == true {
delete(processor.mapBindHandlerEvent[eventType], receiver)
}
}
func (processor *EventProcessor) removeListen(eventType EventType, receiver IEventHandler){
func (processor *EventProcessor) removeListen(eventType EventType, receiver IEventHandler) {
processor.locker.Lock()
defer processor.locker.Unlock()
if _,ok := processor.mapListenerEvent[eventType];ok == true{
processor.mapListenerEvent[eventType][receiver.GetEventProcessor()]-=1
if _, ok := processor.mapListenerEvent[eventType]; ok == true {
processor.mapListenerEvent[eventType][receiver.GetEventProcessor()] -= 1
if processor.mapListenerEvent[eventType][receiver.GetEventProcessor()] <= 0 {
delete(processor.mapListenerEvent[eventType], receiver.GetEventProcessor())
}
}
}
func (processor *EventProcessor) RegEventReceiverFunc(eventType EventType, receiver IEventHandler,callback EventCallBack){
func (processor *EventProcessor) RegEventReceiverFunc(eventType EventType, receiver IEventHandler, callback EventCallBack) {
//记录receiver自己注册过的事件
receiver.addRegInfo(eventType, processor)
//记录当前所属IEventProcessor注册的回调
receiver.GetEventProcessor().addBindEvent(eventType, receiver,callback)
receiver.GetEventProcessor().addBindEvent(eventType, receiver, callback)
//将注册加入到监听中
processor.addListen(eventType, receiver)
}
@@ -195,10 +197,10 @@ func (processor *EventProcessor) UnRegEventReceiverFun(eventType EventType, rece
receiver.removeRegInfo(eventType, processor)
}
func (handler *EventHandler) Destroy(){
func (handler *EventHandler) Destroy() {
handler.locker.Lock()
defer handler.locker.Unlock()
for eventTyp,mapEventProcess := range handler.mapRegEvent {
for eventTyp, mapEventProcess := range handler.mapRegEvent {
if mapEventProcess == nil {
continue
}
@@ -212,31 +214,27 @@ func (handler *EventHandler) Destroy(){
func (processor *EventProcessor) EventHandler(ev IEvent) {
defer func() {
if r := recover(); r != nil {
buf := make([]byte, 4096)
l := runtime.Stack(buf, false)
errString := fmt.Sprint(r)
log.Dump(string(buf[:l]),log.String("error",errString))
log.StackError(fmt.Sprint(r))
}
}()
mapCallBack,ok := processor.mapBindHandlerEvent[ev.GetEventType()]
mapCallBack, ok := processor.mapBindHandlerEvent[ev.GetEventType()]
if ok == false {
return
}
for _,callback := range mapCallBack {
for _, callback := range mapCallBack {
callback(ev)
}
}
func (processor *EventProcessor) castEvent(event IEvent){
func (processor *EventProcessor) castEvent(event IEvent) {
if processor.mapListenerEvent == nil {
log.Error("mapListenerEvent not init!")
return
}
eventProcessor,ok := processor.mapListenerEvent[event.GetEventType()]
if ok == false || processor == nil{
log.Debug("event is not listen",log.Int("event type",int(event.GetEventType())))
eventProcessor, ok := processor.mapListenerEvent[event.GetEventType()]
if ok == false || processor == nil {
return
}
@@ -244,4 +242,3 @@ func (processor *EventProcessor) castEvent(event IEvent){
proc.PushEvent(event)
}
}

View File

@@ -1,6 +1,6 @@
package event
import "github.com/duanhf2012/origin/util/sync"
import "github.com/duanhf2012/origin/v2/util/sync"
// eventPool的内存池,缓存Event
const defaultMaxEventChannelNum = 2000000

View File

@@ -2,21 +2,22 @@ package event
type EventType int
//大于Sys_Event_User_Define给用户定义
// 大于Sys_Event_User_Define给用户定义
const (
ServiceRpcRequestEvent EventType = -1
ServiceRpcResponseEvent EventType = -2
ServiceRpcRequestEvent EventType = -1
ServiceRpcResponseEvent EventType = -2
Sys_Event_Tcp EventType = -3
Sys_Event_Http_Event EventType = -4
Sys_Event_Tcp EventType = -3
Sys_Event_Http_Event EventType = -4
Sys_Event_WebSocket EventType = -5
Sys_Event_Node_Event EventType = -6
Sys_Event_DiscoverService EventType = -7
Sys_Event_DiscardGoroutine EventType = -8
Sys_Event_QueueTaskFinish EventType = -9
Sys_Event_Kcp EventType = -6
Sys_Event_Node_Conn_Event EventType = -7
Sys_Event_Nats_Conn_Event EventType = -8
Sys_Event_DiscoverService EventType = -9
Sys_Event_Retire EventType = -10
Sys_Event_EtcdDiscovery EventType = -11
Sys_Event_Gin_Event EventType = -12
Sys_Event_FrameTick EventType = -13
Sys_Event_User_Define EventType = 1
)

88
go.mod
View File

@@ -1,31 +1,95 @@
module github.com/duanhf2012/origin
module github.com/duanhf2012/origin/v2
go 1.21
go 1.22
toolchain go1.22.7
require (
github.com/IBM/sarama v1.43.3
github.com/gin-gonic/gin v1.10.0
github.com/go-sql-driver/mysql v1.6.0
github.com/gomodule/redigo v1.8.8
github.com/google/uuid v1.6.0
github.com/gorilla/websocket v1.5.0
github.com/json-iterator/go v1.1.12
github.com/pierrec/lz4/v4 v4.1.18
github.com/nats-io/nats.go v1.34.1
github.com/pierrec/lz4/v4 v4.1.21
github.com/shirou/gopsutil v3.21.11+incompatible
github.com/xtaci/kcp-go/v5 v5.6.18
go.etcd.io/etcd/api/v3 v3.5.13
go.etcd.io/etcd/client/v3 v3.5.13
go.mongodb.org/mongo-driver v1.9.1
google.golang.org/protobuf v1.31.0
gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22
go.uber.org/zap v1.27.0
google.golang.org/protobuf v1.34.1
gopkg.in/natefinch/lumberjack.v2 v2.2.1
gopkg.in/yaml.v3 v3.0.1
)
require (
github.com/bytedance/sonic v1.11.6 // indirect
github.com/bytedance/sonic/loader v0.1.1 // indirect
github.com/cloudwego/base64x v0.1.4 // indirect
github.com/cloudwego/iasm v0.2.0 // indirect
github.com/coreos/go-semver v0.3.0 // indirect
github.com/coreos/go-systemd/v22 v22.3.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/eapache/go-resiliency v1.7.0 // indirect
github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 // indirect
github.com/eapache/queue v1.1.0 // indirect
github.com/gabriel-vasile/mimetype v1.4.3 // indirect
github.com/gin-contrib/sse v0.1.0 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-playground/locales v0.14.1 // indirect
github.com/go-playground/universal-translator v0.18.1 // indirect
github.com/go-playground/validator/v10 v10.20.0 // indirect
github.com/go-stack/stack v1.8.0 // indirect
github.com/golang/snappy v0.0.1 // indirect
github.com/klauspost/compress v1.13.6 // indirect
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 // indirect
github.com/goccy/go-json v0.10.2 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/hashicorp/errwrap v1.0.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/go-uuid v1.0.3 // indirect
github.com/jcmturner/aescts/v2 v2.0.0 // indirect
github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect
github.com/jcmturner/gofork v1.7.6 // indirect
github.com/jcmturner/gokrb5/v8 v8.4.4 // indirect
github.com/jcmturner/rpc/v2 v2.0.3 // indirect
github.com/klauspost/compress v1.17.9 // indirect
github.com/klauspost/cpuid/v2 v2.2.7 // indirect
github.com/klauspost/reedsolomon v1.12.0 // indirect
github.com/leodido/go-urn v1.4.0 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/nats-io/nkeys v0.4.7 // indirect
github.com/nats-io/nuid v1.0.1 // indirect
github.com/pelletier/go-toml/v2 v2.2.2 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect
github.com/rogpeppe/go-internal v1.13.1 // indirect
github.com/templexxx/cpu v0.1.1 // indirect
github.com/templexxx/xorsimd v0.4.3 // indirect
github.com/tjfoc/gmsm v1.4.1 // indirect
github.com/tklauser/go-sysconf v0.3.13 // indirect
github.com/tklauser/numcpus v0.7.0 // indirect
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
github.com/ugorji/go/codec v1.2.12 // indirect
github.com/xdg-go/pbkdf2 v1.0.0 // indirect
github.com/xdg-go/scram v1.0.2 // indirect
github.com/xdg-go/stringprep v1.0.2 // indirect
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d // indirect
golang.org/x/crypto v0.1.0 // indirect
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 // indirect
golang.org/x/text v0.4.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
github.com/yusufpapurcu/wmi v1.2.4 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.13 // indirect
go.uber.org/multierr v1.10.0 // indirect
golang.org/x/arch v0.8.0 // indirect
golang.org/x/crypto v0.26.0 // indirect
golang.org/x/net v0.28.0 // indirect
golang.org/x/sync v0.8.0 // indirect
golang.org/x/sys v0.23.0 // indirect
golang.org/x/text v0.17.0 // indirect
google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect
google.golang.org/grpc v1.59.0 // indirect
)

282
go.sum
View File

@@ -1,110 +1,344 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/IBM/sarama v1.43.3 h1:Yj6L2IaNvb2mRBop39N7mmJAHBVY3dTPncr3qGVkxPA=
github.com/IBM/sarama v1.43.3/go.mod h1:FVIRaLrhK3Cla/9FfRF5X9Zua2KpS3SYIXxhac1H+FQ=
github.com/bytedance/sonic v1.11.6 h1:oUp34TzMlL+OY1OUWxHqsdkgC/Zfc85zGqw9siXjrc0=
github.com/bytedance/sonic v1.11.6/go.mod h1:LysEHSvpvDySVdC2f87zGWf6CIKJcAvqab1ZaiQtds4=
github.com/bytedance/sonic/loader v0.1.1 h1:c+e5Pt1k/cy5wMveRDyk2X4B9hF4g7an8N3zCYjJFNM=
github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cloudwego/base64x v0.1.4 h1:jwCgWpFanWmN8xoIUHa2rtzmkd5J2plF/dnLS6Xd/0Y=
github.com/cloudwego/base64x v0.1.4/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w=
github.com/cloudwego/iasm v0.2.0 h1:1KNIy1I1H9hNNFEEH3DVnI4UujN+1zjpuk6gwHLTssg=
github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI=
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/eapache/go-resiliency v1.7.0 h1:n3NRTnBn5N0Cbi/IeOHuQn9s2UwVUH7Ga0ZWcP+9JTA=
github.com/eapache/go-resiliency v1.7.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho=
github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 h1:Oy0F4ALJ04o5Qqpdz8XLIpNA3WM/iSIXqxtqo7UGVws=
github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3/go.mod h1:YvSRo5mw33fLEx1+DlK6L2VV43tJt5Eyel9n9XBcR+0=
github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc=
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw=
github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0=
github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk=
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
github.com/gin-gonic/gin v1.10.0 h1:nTuyha1TYqgedzytsKYqna+DfLos46nTv2ygFy86HFU=
github.com/gin-gonic/gin v1.10.0/go.mod h1:4PMNQiOhvDRa013RKVbsiNwoyezlm2rm0uX/T7kzp5Y=
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
github.com/go-playground/validator/v10 v10.20.0 h1:K9ISHbSaI0lyB2eWMPJo+kOS/FBExVwjEviJTixqxL8=
github.com/go-playground/validator/v10 v10.20.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM=
github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/gomodule/redigo v1.8.8 h1:f6cXq6RRfiyrOJEV7p3JhLDlmawGBVBBP1MggY8Mo4E=
github.com/gomodule/redigo v1.8.8/go.mod h1:7ArFNvsTjH8GMMzB4uy1snslv2BwmginuMs06a1uzZE=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4=
github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8=
github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8=
github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs=
github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo=
github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM=
github.com/jcmturner/gofork v1.7.6 h1:QH0l3hzAU1tfT3rZCnW5zXl+orbkNMMRGJfdJjHVETg=
github.com/jcmturner/gofork v1.7.6/go.mod h1:1622LH6i/EZqLloHfE7IeZ0uEJwMSUyQ/nDd82IeqRo=
github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o=
github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg=
github.com/jcmturner/gokrb5/v8 v8.4.4 h1:x1Sv4HaTpepFkXbt2IkL29DXRf8sOfZXo8eRKh687T8=
github.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs=
github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY=
github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc=
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM=
github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
github.com/klauspost/reedsolomon v1.12.0 h1:I5FEp3xSwVCcEh3F5A7dofEfhXdF/bWhQWPH+XwBFno=
github.com/klauspost/reedsolomon v1.12.0/go.mod h1:EPLZJeh4l27pUGC3aXOjheaoh1I9yut7xTURiW3LQ9Y=
github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ=
github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/nats-io/nats.go v1.34.1 h1:syWey5xaNHZgicYBemv0nohUPPmaLteiBEUT6Q5+F/4=
github.com/nats-io/nats.go v1.34.1/go.mod h1:Ubdu4Nh9exXdSz0RVWRFBbRfrbSxOYd26oF0wkWclB8=
github.com/nats-io/nkeys v0.4.7 h1:RwNJbbIdYCoClSDNY7QVKZlyb/wfT6ugvFCiKy6vDvI=
github.com/nats-io/nkeys v0.4.7/go.mod h1:kqXRgRDPlGy7nGaEDMuYzmiJCIAAWDK0IMBtDmGD0nc=
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM=
github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs=
github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ=
github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM=
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/templexxx/cpu v0.1.1 h1:isxHaxBXpYFWnk2DReuKkigaZyrjs2+9ypIdGP4h+HI=
github.com/templexxx/cpu v0.1.1/go.mod h1:w7Tb+7qgcAlIyX4NhLuDKt78AHA5SzPmq0Wj6HiEnnk=
github.com/templexxx/xorsimd v0.4.3 h1:9AQTFHd7Bhk3dIT7Al2XeBX5DWOvsUPZCuhyAtNbHjU=
github.com/templexxx/xorsimd v0.4.3/go.mod h1:oZQcD6RFDisW2Am58dSAGwwL6rHjbzrlu25VDqfWkQg=
github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4=
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
github.com/tjfoc/gmsm v1.4.1 h1:aMe1GlZb+0bLjn+cKTPEvvn9oUEBlJitaZiiBwsbgho=
github.com/tjfoc/gmsm v1.4.1/go.mod h1:j4INPkHWMrhJb38G+J6W4Tw0AbuN8Thu3PbdVYhVcTE=
github.com/tklauser/go-sysconf v0.3.13 h1:GBUpcahXSpR2xN01jhkNAbTLRk2Yzgggk8IM08lq3r4=
github.com/tklauser/go-sysconf v0.3.13/go.mod h1:zwleP4Q4OehZHGn4CYZDipCgg9usW5IJePewFCGVEa0=
github.com/tklauser/numcpus v0.7.0 h1:yjuerZP127QG9m5Zh/mSO4wqurYil27tHrqwRoRjpr4=
github.com/tklauser/numcpus v0.7.0/go.mod h1:bb6dMVcj8A42tSE7i32fsIUCbQNllK5iDguyOZRUzAY=
github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE=
github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c=
github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
github.com/xdg-go/scram v1.0.2 h1:akYIkZ28e6A96dkWNJQu3nmCzH3YfwMPQExUYDaRv7w=
github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs=
github.com/xdg-go/stringprep v1.0.2 h1:6iq84/ryjjeRmMJwxutI51F2GIPlP5BfTvXHeYjyhBc=
github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM=
github.com/xtaci/kcp-go/v5 v5.6.18 h1:7oV4mc272pcnn39/13BB11Bx7hJM4ogMIEokJYVWn4g=
github.com/xtaci/kcp-go/v5 v5.6.18/go.mod h1:75S1AKYYzNUSXIv30h+jPKJYZUwqpfvLshu63nCNSOM=
github.com/xtaci/lossyconn v0.0.0-20190602105132-8df528c0c9ae h1:J0GxkO96kL4WF+AIT3M4mfUVinOCPgf2uUWYFUzN0sM=
github.com/xtaci/lossyconn v0.0.0-20190602105132-8df528c0c9ae/go.mod h1:gXtu8J62kEgmN++bm9BVICuT/e8yiLI2KFobd/TRFsE=
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d h1:splanxYIlg+5LfHAM6xpdFEAYOk8iySO56hMFq6uLyA=
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
go.etcd.io/etcd/api/v3 v3.5.13 h1:8WXU2/NBge6AUF1K1gOexB6e07NgsN1hXK0rSTtgSp4=
go.etcd.io/etcd/api/v3 v3.5.13/go.mod h1:gBqlqkcMMZMVTMm4NDZloEVJzxQOQIls8splbqBDa0c=
go.etcd.io/etcd/client/pkg/v3 v3.5.13 h1:RVZSAnWWWiI5IrYAXjQorajncORbS0zI48LQlE2kQWg=
go.etcd.io/etcd/client/pkg/v3 v3.5.13/go.mod h1:XxHT4u1qU12E2+po+UVPrEeL94Um6zL58ppuJWXSAB8=
go.etcd.io/etcd/client/v3 v3.5.13 h1:o0fHTNJLeO0MyVbc7I3fsCf6nrOqn5d+diSarKnB2js=
go.etcd.io/etcd/client/v3 v3.5.13/go.mod h1:cqiAeY8b5DEEcpxvgWKsbLIWNM/8Wy2xJSDMtioMcoI=
go.mongodb.org/mongo-driver v1.9.1 h1:m078y9v7sBItkt1aaoe2YlvWEXcD263e1a4E1fBrJ1c=
go.mongodb.org/mongo-driver v1.9.1/go.mod h1:0sQWfOeY63QTntERDJJ/0SuKK0T1uVSgKCuAROlKEPY=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ=
go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
golang.org/x/arch v0.8.0 h1:3wRIsP3pM4yUptoR96otTUOXI367OS0+c9eeRi9doIc=
golang.org/x/arch v0.8.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201012173705-84dcc777aaee/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU=
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw=
golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE=
golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.23.0 h1:YfKFowiIMvtgl1UERQoTPPToxltDeZfbj4H7dVUCwmM=
golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc=
golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d h1:VBu5YqKPv6XiJ199exd8Br+Aetz+o08F+PLMnwJQHAY=
google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4=
google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d h1:DoPTO70H+bcDXcd39vOqb2viZxgqeBeSGtZ55yZU4/Q=
google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk=
google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg=
google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22 h1:VpOs+IwYnYBaFnrNAeB8UUWtL3vEUnzSCL1nVjPhqrw=
gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50=
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=

View File

@@ -1,96 +0,0 @@
package log // import "go.uber.org/zap/buffer"
import (
"strconv"
)
const _size = 9216
type Buffer struct {
bs []byte
//mu sync.Mutex // ensures atomic writes; protects the following fields
}
func (buff *Buffer) Init(){
buff.bs = make([]byte,_size)
}
// AppendByte writes a single byte to the Buffer.
func (b *Buffer) AppendByte(v byte) {
b.bs = append(b.bs, v)
}
func (b *Buffer) AppendBytes(v []byte) {
b.bs = append(b.bs, v...)
}
// AppendString writes a string to the Buffer.
func (b *Buffer) AppendString(s string) {
b.bs = append(b.bs, s...)
}
// AppendInt appends an integer to the underlying buffer (assuming base 10).
func (b *Buffer) AppendInt(i int64) {
b.bs = strconv.AppendInt(b.bs, i, 10)
}
// AppendUint appends an unsigned integer to the underlying buffer (assuming
// base 10).
func (b *Buffer) AppendUint(i uint64) {
b.bs = strconv.AppendUint(b.bs, i, 10)
}
// AppendBool appends a bool to the underlying buffer.
func (b *Buffer) AppendBool(v bool) {
b.bs = strconv.AppendBool(b.bs, v)
}
// AppendFloat appends a float to the underlying buffer. It doesn't quote NaN
// or +/- Inf.
func (b *Buffer) AppendFloat(f float64, bitSize int) {
b.bs = strconv.AppendFloat(b.bs, f, 'f', -1, bitSize)
}
// Len returns the length of the underlying byte slice.
func (b *Buffer) Len() int {
return len(b.bs)
}
// Cap returns the capacity of the underlying byte slice.
func (b *Buffer) Cap() int {
return cap(b.bs)
}
// Bytes returns a mutable reference to the underlying byte slice.
func (b *Buffer) Bytes() []byte {
return b.bs
}
// String returns a string copy of the underlying byte slice.
func (b *Buffer) String() string {
return string(b.bs)
}
// Reset resets the underlying byte slice. Subsequent writes re-use the slice's
// backing array.
func (b *Buffer) Reset() {
b.bs = b.bs[:0]
}
// Write implements io.Writer.
func (b *Buffer) Write(bs []byte) (int, error) {
b.bs = append(b.bs, bs...)
return len(bs), nil
}
// TrimNewline trims any final "\n" byte from the end of the buffer.
func (b *Buffer) TrimNewline() {
if i := len(b.bs) - 1; i >= 0 {
if b.bs[i] == '\n' {
b.bs = b.bs[:i]
}
}
}

View File

@@ -1,147 +0,0 @@
package log
import (
"context"
"io"
"log/slog"
"path/filepath"
"runtime"
"runtime/debug"
"sync"
)
type IOriginHandler interface {
slog.Handler
Lock()
UnLock()
}
type BaseHandler struct {
addSource bool
w io.Writer
locker sync.Mutex
}
type OriginTextHandler struct {
BaseHandler
*slog.TextHandler
}
type OriginJsonHandler struct {
BaseHandler
*slog.JSONHandler
}
func getStrLevel(level slog.Level) string{
switch level {
case LevelTrace:
return "Trace"
case LevelDebug:
return "Debug"
case LevelInfo:
return "Info"
case LevelWarning:
return "Warning"
case LevelError:
return "Error"
case LevelStack:
return "Stack"
case LevelDump:
return "Dump"
case LevelFatal:
return "Fatal"
}
return ""
}
func defaultReplaceAttr(groups []string, a slog.Attr) slog.Attr {
if a.Key == slog.LevelKey {
level := a.Value.Any().(slog.Level)
a.Value = slog.StringValue(getStrLevel(level))
}else if a.Key == slog.TimeKey && len(groups) == 0 {
a.Value = slog.StringValue(a.Value.Time().Format("2006/01/02 15:04:05"))
}else if a.Key == slog.SourceKey {
source := a.Value.Any().(*slog.Source)
source.File = filepath.Base(source.File)
}
return a
}
func NewOriginTextHandler(level slog.Level,w io.Writer,addSource bool,replaceAttr func([]string,slog.Attr) slog.Attr) slog.Handler{
var textHandler OriginTextHandler
textHandler.addSource = addSource
textHandler.w = w
textHandler.TextHandler = slog.NewTextHandler(w,&slog.HandlerOptions{
AddSource: addSource,
Level: level,
ReplaceAttr: replaceAttr,
})
return &textHandler
}
func (oh *OriginTextHandler) Handle(context context.Context, record slog.Record) error{
oh.Fill(context,&record)
oh.locker.Lock()
defer oh.locker.Unlock()
if record.Level == LevelStack || record.Level == LevelFatal{
err := oh.TextHandler.Handle(context, record)
oh.logStack(&record)
return err
}else if record.Level == LevelDump {
strDump := record.Message
record.Message = "dump info"
err := oh.TextHandler.Handle(context, record)
oh.w.Write([]byte(strDump))
return err
}
return oh.TextHandler.Handle(context, record)
}
func (b *BaseHandler) logStack(record *slog.Record){
b.w.Write(debug.Stack())
}
func (b *BaseHandler) Lock(){
b.locker.Lock()
}
func (b *BaseHandler) UnLock(){
b.locker.Unlock()
}
func NewOriginJsonHandler(level slog.Level,w io.Writer,addSource bool,replaceAttr func([]string,slog.Attr) slog.Attr) slog.Handler{
var jsonHandler OriginJsonHandler
jsonHandler.addSource = addSource
jsonHandler.w = w
jsonHandler.JSONHandler = slog.NewJSONHandler(w,&slog.HandlerOptions{
AddSource: addSource,
Level: level,
ReplaceAttr: replaceAttr,
})
return &jsonHandler
}
func (oh *OriginJsonHandler) Handle(context context.Context, record slog.Record) error{
oh.Fill(context,&record)
if record.Level == LevelStack || record.Level == LevelFatal || record.Level == LevelDump{
record.Add("stack",debug.Stack())
}
oh.locker.Lock()
defer oh.locker.Unlock()
return oh.JSONHandler.Handle(context, record)
}
func (b *BaseHandler) Fill(context context.Context, record *slog.Record) {
if b.addSource {
var pcs [1]uintptr
runtime.Callers(7, pcs[:])
record.PC = pcs[0]
}
}

View File

@@ -1,502 +1,401 @@
package log
import (
"context"
"fmt"
"github.com/duanhf2012/origin/util/bytespool"
jsoniter "github.com/json-iterator/go"
"io"
"log/slog"
"github.com/duanhf2012/rotatelogs"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"gopkg.in/natefinch/lumberjack.v2"
"os"
"path"
"path/filepath"
"runtime"
"sync"
"sync/atomic"
"strings"
"time"
)
var json = jsoniter.ConfigCompatibleWithStandardLibrary
var OpenConsole bool
var LogSize int64
var LogChannelCap int
var isSetLogger bool
var gLogger = NewDefaultLogger()
var LogLevel zapcore.Level
var MaxSize int
var LogPath string
var LogLevel slog.Level = LevelTrace
var gLogger, _ = NewTextLogger(LevelDebug, "", "",true,LogChannelCap)
var memPool = bytespool.NewMemAreaPool()
// levels
const (
LevelTrace = slog.Level(-8)
LevelDebug = slog.LevelDebug
LevelInfo = slog.LevelInfo
LevelWarning = slog.LevelWarn
LevelError = slog.LevelError
LevelStack = slog.Level(12)
LevelDump = slog.Level(16)
LevelFatal = slog.Level(20)
)
var OpenConsole *bool
var LogChanLen int
type Logger struct {
Slogger *slog.Logger
*zap.Logger
stack bool
ioWriter IoWriter
sBuff Buffer
FileName string
Skip int
Encoder zapcore.Encoder
SugaredLogger *zap.SugaredLogger
CoreList []zapcore.Core
WriteSyncerFun []func() zapcore.WriteSyncer
}
type IoWriter struct {
outFile io.Writer // destination for output
outConsole io.Writer //os.Stdout
writeBytes int64
logChannel chan []byte
wg sync.WaitGroup
closeSig chan struct{}
lockWrite sync.Mutex
filePath string
fileprefix string
fileDay int
fileCreateTime int64 //second
}
func (iw *IoWriter) Close() error {
iw.lockWrite.Lock()
defer iw.lockWrite.Unlock()
iw.close()
return nil
}
func (iw *IoWriter) close() error {
if iw.closeSig != nil {
close(iw.closeSig)
iw.closeSig = nil
}
iw.wg.Wait()
if iw.outFile!= nil {
err := iw.outFile.(io.Closer).Close()
iw.outFile = nil
return err
}
return nil
}
func (iw *IoWriter) writeFile(p []byte) (n int, err error){
//swich log file
iw.swichFile()
if iw.outFile != nil {
n,err = iw.outFile.Write(p)
if n > 0 {
atomic.AddInt64(&iw.writeBytes,int64(n))
}
}
return 0,nil
}
func (iw *IoWriter) Write(p []byte) (n int, err error){
iw.lockWrite.Lock()
defer iw.lockWrite.Unlock()
if iw.logChannel == nil {
return iw.writeIo(p)
}
copyBuff := memPool.MakeBytes(len(p))
if copyBuff == nil {
return 0,fmt.Errorf("MakeByteSlice failed")
}
copy(copyBuff,p)
iw.logChannel <- copyBuff
return
}
func (iw *IoWriter) writeIo(p []byte) (n int, err error){
n,err = iw.writeFile(p)
if iw.outConsole != nil {
n,err = iw.outConsole.Write(p)
}
return
}
func (iw *IoWriter) setLogChannel(logChannelNum int) (err error){
iw.lockWrite.Lock()
defer iw.lockWrite.Unlock()
iw.close()
if logChannelNum == 0 {
return nil
}
//copy iw.logChannel
var logInfo []byte
logChannel := make(chan []byte,logChannelNum)
for i := 0; i < logChannelNum&&i<len(iw.logChannel); i++{
logInfo = <- iw.logChannel
logChannel <- logInfo
}
iw.logChannel = logChannel
iw.closeSig = make(chan struct{})
iw.wg.Add(1)
go iw.run()
return nil
}
func (iw *IoWriter) run(){
defer iw.wg.Done()
Loop:
for{
select {
case <- iw.closeSig:
break Loop
case logs := <-iw.logChannel:
iw.writeIo(logs)
memPool.ReleaseBytes(logs)
}
}
for len(iw.logChannel) > 0 {
logs := <-iw.logChannel
iw.writeIo(logs)
memPool.ReleaseBytes(logs)
}
}
func (iw *IoWriter) isFull() bool {
if LogSize == 0 {
return false
}
return atomic.LoadInt64(&iw.writeBytes) >= LogSize
}
func (logger *Logger) setLogChannel(logChannel int) (err error){
return logger.ioWriter.setLogChannel(logChannel)
}
func (iw *IoWriter) swichFile() error{
now := time.Now()
if iw.fileCreateTime == now.Unix() {
return nil
}
if iw.fileDay == now.Day() && iw.isFull() == false {
return nil
}
if iw.filePath != "" {
var err error
fileName := fmt.Sprintf("%s%d%02d%02d_%02d_%02d_%02d.log",
iw.fileprefix,
now.Year(),
now.Month(),
now.Day(),
now.Hour(),
now.Minute(),
now.Second())
filePath := path.Join(iw.filePath, fileName)
iw.outFile,err = os.Create(filePath)
if err != nil {
return err
}
iw.fileDay = now.Day()
iw.fileCreateTime = now.Unix()
atomic.StoreInt64(&iw.writeBytes,0)
if OpenConsole == true {
iw.outConsole = os.Stdout
}
}else{
iw.outConsole = os.Stdout
}
return nil
}
func NewTextLogger(level slog.Level,pathName string,filePrefix string,addSource bool,logChannelCap int) (*Logger,error){
var logger Logger
logger.ioWriter.filePath = pathName
logger.ioWriter.fileprefix = filePrefix
logger.Slogger = slog.New(NewOriginTextHandler(level,&logger.ioWriter,addSource,defaultReplaceAttr))
logger.setLogChannel(logChannelCap)
err := logger.ioWriter.swichFile()
if err != nil {
return nil,err
}
return &logger,nil
}
func NewJsonLogger(level slog.Level,pathName string,filePrefix string,addSource bool,logChannelCap int) (*Logger,error){
var logger Logger
logger.ioWriter.filePath = pathName
logger.ioWriter.fileprefix = filePrefix
logger.Slogger = slog.New(NewOriginJsonHandler(level,&logger.ioWriter,true,defaultReplaceAttr))
logger.setLogChannel(logChannelCap)
err := logger.ioWriter.swichFile()
if err != nil {
return nil,err
}
return &logger,nil
}
// It's dangerous to call the method on logging
func (logger *Logger) Close() {
logger.ioWriter.Close()
}
func (logger *Logger) Trace(msg string, args ...any) {
logger.Slogger.Log(context.Background(),LevelTrace,msg,args...)
}
func (logger *Logger) Debug(msg string, args ...any) {
logger.Slogger.Log(context.Background(),LevelDebug,msg,args...)
}
func (logger *Logger) Info(msg string, args ...any) {
logger.Slogger.Log(context.Background(),LevelInfo,msg,args...)
}
func (logger *Logger) Warning(msg string, args ...any) {
logger.Slogger.Log(context.Background(),LevelWarning,msg,args...)
}
func (logger *Logger) Error(msg string, args ...any) {
logger.Slogger.Log(context.Background(),LevelError,msg,args...)
}
func (logger *Logger) Stack(msg string, args ...any) {
logger.Slogger.Log(context.Background(),LevelStack,msg,args...)
}
func (logger *Logger) Dump(msg string, args ...any) {
logger.Slogger.Log(context.Background(),LevelDump,msg,args...)
}
func (logger *Logger) Fatal(msg string, args ...any) {
logger.Slogger.Log(context.Background(),LevelFatal,msg,args...)
os.Exit(1)
}
// It's dangerous to call the method on logging
func Export(logger *Logger) {
// 设置Logger
func SetLogger(logger *Logger) {
if logger != nil {
gLogger = logger
isSetLogger = true
}
}
func Trace(msg string, args ...any){
gLogger.Trace(msg, args...)
// 设置ZapLogger
func SetZapLogger(zapLogger *zap.Logger) {
if zapLogger != nil {
gLogger = &Logger{}
gLogger.Logger = zapLogger
isSetLogger = true
}
}
func Debug(msg string, args ...any){
gLogger.Debug(msg,args...)
func GetLogger() *Logger {
return gLogger
}
func Info(msg string, args ...any){
gLogger.Info(msg,args...)
func (logger *Logger) SetEncoder(encoder zapcore.Encoder) {
logger.Encoder = encoder
}
func Warning(msg string, args ...any){
gLogger.Warning(msg,args...)
func (logger *Logger) SetSkip(skip int) {
logger.Skip = skip
}
func Error(msg string, args ...any){
gLogger.Error(msg,args...)
}
func Stack(msg string, args ...any){
gLogger.Stack(msg,args...)
}
func Dump(dump string, args ...any){
gLogger.Dump(dump,args...)
}
func Fatal(msg string, args ...any){
gLogger.Fatal(msg,args...)
}
func Close() {
gLogger.Close()
}
func ErrorAttr(key string,value error) slog.Attr{
if value== nil {
return slog.Attr{key, slog.StringValue("nil")}
func GetJsonEncoder() zapcore.Encoder {
encoderConfig := zap.NewProductionEncoderConfig()
encoderConfig.EncodeCaller = zapcore.ShortCallerEncoder
encoderConfig.EncodeLevel = zapcore.CapitalLevelEncoder
encoderConfig.EncodeTime = func(t time.Time, enc zapcore.PrimitiveArrayEncoder) {
enc.AppendString(t.Format("2006-01-02 15:04:05.000"))
}
return slog.Attr{key, slog.StringValue(value.Error())}
return zapcore.NewJSONEncoder(encoderConfig)
}
func String(key, value string) slog.Attr {
return slog.Attr{key, slog.StringValue(value)}
func GetTxtEncoder() zapcore.Encoder {
encoderConfig := zap.NewProductionEncoderConfig()
encoderConfig.EncodeLevel = zapcore.CapitalLevelEncoder
encoderConfig.EncodeCaller = zapcore.ShortCallerEncoder
encoderConfig.EncodeTime = func(t time.Time, enc zapcore.PrimitiveArrayEncoder) {
enc.AppendString(t.Format("2006-01-02 15:04:05.000"))
}
return zapcore.NewConsoleEncoder(encoderConfig)
}
func Int(key string, value int) slog.Attr {
return slog.Attr{key, slog.Int64Value(int64(value))}
func (logger *Logger) getLogConfig() *lumberjack.Logger {
return &lumberjack.Logger{
Filename: filepath.Join(LogPath, logger.FileName),
MaxSize: MaxSize,
MaxBackups: 0,
MaxAge: 0,
Compress: false,
LocalTime: true,
}
}
func Int64(key string, value int64) slog.Attr {
return slog.Attr{key, slog.Int64Value(value)}
func NewDefaultLogger() *Logger {
logger := Logger{}
logger.Encoder = GetJsonEncoder()
core := zapcore.NewCore(logger.Encoder, zapcore.AddSync(os.Stdout), zap.InfoLevel)
logger.Logger = zap.New(core, zap.AddCaller(), zap.AddCallerSkip(1))
return &logger
}
func Int32(key string, value int32) slog.Attr {
return slog.Attr{key, slog.Int64Value(int64(value))}
func (logger *Logger) SetSyncers(syncers ...func() zapcore.WriteSyncer) {
logger.WriteSyncerFun = syncers
}
func Int16(key string, value int16) slog.Attr {
return slog.Attr{key, slog.Int64Value(int64(value))}
func (logger *Logger) AppendSyncerFun(syncerFun func() zapcore.WriteSyncer) {
logger.WriteSyncerFun = append(logger.WriteSyncerFun, syncerFun)
}
func Int8(key string, value int8) slog.Attr {
return slog.Attr{key, slog.Int64Value(int64(value))}
func SetLogLevel(level zapcore.Level) {
LogLevel = level
}
func Uint(key string, value uint) slog.Attr {
return slog.Attr{key, slog.Uint64Value(uint64(value))}
func (logger *Logger) Enabled(zapcore.Level) bool {
return logger.stack
}
func Uint64(key string, v uint64) slog.Attr {
return slog.Attr{key, slog.Uint64Value(v)}
func (logger *Logger) NewLumberjackWriter() zapcore.WriteSyncer {
return zapcore.AddSync(
&lumberjack.Logger{
Filename: filepath.Join(LogPath, logger.FileName),
MaxSize: MaxSize,
MaxBackups: 0,
MaxAge: 0,
Compress: false,
LocalTime: true,
})
}
func Uint32(key string, value uint32) slog.Attr {
return slog.Attr{key, slog.Uint64Value(uint64(value))}
func (logger *Logger) NewRotatelogsWriter() zapcore.WriteSyncer {
var options []rotatelogs.Option
if MaxSize > 0 {
options = append(options, rotatelogs.WithRotateMaxSize(int64(MaxSize)))
}
if LogChanLen > 0 {
options = append(options, rotatelogs.WithChannelLen(LogChanLen))
}
options = append(options, rotatelogs.WithRotationTime(time.Hour*24))
fileName := strings.TrimRight(logger.FileName, filepath.Ext(logger.FileName))
rotateLogs, err := rotatelogs.NewRotateLogs(LogPath, "20060102/"+fileName+"_20060102_150405", options...)
if err != nil {
panic(err)
}
return zapcore.AddSync(rotateLogs)
}
func Uint16(key string, value uint16) slog.Attr {
return slog.Attr{key, slog.Uint64Value(uint64(value))}
}
func Uint8(key string, value uint8) slog.Attr {
return slog.Attr{key, slog.Uint64Value(uint64(value))}
}
func Float64(key string, v float64) slog.Attr {
return slog.Attr{key, slog.Float64Value(v)}
}
func Bool(key string, v bool) slog.Attr {
return slog.Attr{key, slog.BoolValue(v)}
}
func Time(key string, v time.Time) slog.Attr {
return slog.Attr{key, slog.TimeValue(v)}
}
func Duration(key string, v time.Duration) slog.Attr {
return slog.Attr{key, slog.DurationValue(v)}
}
func Any(key string, value any) slog.Attr {
return slog.Attr{key, slog.AnyValue(value)}
}
func Group(key string, args ...any) slog.Attr {
return slog.Group(key, args...)
}
func (logger *Logger) doSPrintf(level slog.Level,a []interface{}) {
if logger.Slogger.Enabled(context.Background(),level) == false{
func (logger *Logger) Init() {
if isSetLogger {
return
}
logger.Slogger.Handler().(IOriginHandler).Lock()
defer logger.Slogger.Handler().(IOriginHandler).UnLock()
logger.sBuff.Reset()
logger.formatHeader(&logger.sBuff,level,3)
for _,s := range a {
logger.sBuff.AppendString(slog.AnyValue(s).String())
var syncerList []zapcore.WriteSyncer
if logger.WriteSyncerFun == nil {
syncerList = append(syncerList, logger.NewRotatelogsWriter())
} else {
for _, syncer := range logger.WriteSyncerFun {
syncerList = append(syncerList, syncer())
}
}
logger.sBuff.AppendString("\"\n")
logger.ioWriter.Write([]byte(logger.sBuff.Bytes()))
}
func (logger *Logger) STrace(a ...interface{}) {
logger.doSPrintf(LevelTrace,a)
}
func (logger *Logger) SDebug(a ...interface{}) {
logger.doSPrintf(LevelDebug,a)
}
func (logger *Logger) SInfo(a ...interface{}) {
logger.doSPrintf(LevelInfo,a)
}
func (logger *Logger) SWarning(a ...interface{}) {
logger.doSPrintf(LevelWarning,a)
}
func (logger *Logger) SError(a ...interface{}) {
logger.doSPrintf(LevelError,a)
}
func STrace(a ...interface{}) {
gLogger.doSPrintf(LevelTrace,a)
}
func SDebug(a ...interface{}) {
gLogger.doSPrintf(LevelDebug,a)
}
func SInfo(a ...interface{}) {
gLogger.doSPrintf(LevelInfo,a)
}
func SWarning(a ...interface{}) {
gLogger.doSPrintf(LevelWarning,a)
}
func SError(a ...interface{}) {
gLogger.doSPrintf(LevelError,a)
}
func (logger *Logger) formatHeader(buf *Buffer,level slog.Level,calldepth int) {
t := time.Now()
var file string
var line int
// Release lock while getting caller info - it's expensive.
var ok bool
_, file, line, ok = runtime.Caller(calldepth)
if !ok {
file = "???"
line = 0
var coreList []zapcore.Core
if OpenConsole == nil || *OpenConsole {
syncerList = append(syncerList, zapcore.AddSync(os.Stdout))
}
file = filepath.Base(file)
buf.AppendString("time=\"")
buf.AppendString(t.Format("2006/01/02 15:04:05"))
buf.AppendString("\"")
logger.sBuff.AppendString(" level=")
logger.sBuff.AppendString(getStrLevel(level))
logger.sBuff.AppendString(" source=")
for _, writer := range syncerList {
core := zapcore.NewCore(logger.Encoder, writer, LogLevel)
coreList = append(coreList, core)
}
buf.AppendString(file)
buf.AppendByte(':')
buf.AppendInt(int64(line))
buf.AppendString(" msg=\"")
}
if logger.CoreList != nil {
coreList = append(coreList, logger.CoreList...)
}
core := zapcore.NewTee(coreList...)
logger.Logger = zap.New(core, zap.AddCaller(), zap.AddStacktrace(logger), zap.AddCallerSkip(1+logger.Skip))
logger.SugaredLogger = logger.Logger.Sugar()
}
func (logger *Logger) Debug(msg string, fields ...zap.Field) {
logger.Logger.Debug(msg, fields...)
}
func (logger *Logger) Info(msg string, fields ...zap.Field) {
logger.Logger.Info(msg, fields...)
}
func (logger *Logger) Warn(msg string, fields ...zap.Field) {
logger.Logger.Warn(msg, fields...)
}
func (logger *Logger) Error(msg string, fields ...zap.Field) {
logger.Logger.Error(msg, fields...)
}
func (logger *Logger) StackError(msg string, args ...zap.Field) {
logger.stack = true
logger.Logger.Log(zapcore.ErrorLevel, msg, args...)
logger.stack = false
}
func (logger *Logger) Fatal(msg string, fields ...zap.Field) {
gLogger.stack = true
logger.Logger.Fatal(msg, fields...)
gLogger.stack = false
}
func Debug(msg string, fields ...zap.Field) {
gLogger.Logger.Debug(msg, fields...)
}
func Info(msg string, fields ...zap.Field) {
gLogger.Logger.Info(msg, fields...)
}
func Warn(msg string, fields ...zap.Field) {
gLogger.Logger.Warn(msg, fields...)
}
func Error(msg string, fields ...zap.Field) {
gLogger.Logger.Error(msg, fields...)
}
func StackError(msg string, fields ...zap.Field) {
gLogger.stack = true
gLogger.Logger.Error(msg, fields...)
gLogger.stack = false
}
func Fatal(msg string, fields ...zap.Field) {
gLogger.stack = true
gLogger.Logger.Fatal(msg, fields...)
gLogger.stack = false
}
func Debugf(template string, args ...any) {
gLogger.SugaredLogger.Debugf(template, args...)
}
func Infof(template string, args ...any) {
gLogger.SugaredLogger.Infof(template, args...)
}
func Warnf(template string, args ...any) {
gLogger.SugaredLogger.Warnf(template, args...)
}
func Errorf(template string, args ...any) {
gLogger.SugaredLogger.Errorf(template, args...)
}
func StackErrorf(template string, args ...any) {
gLogger.stack = true
gLogger.SugaredLogger.Errorf(template, args...)
gLogger.stack = false
}
func Fatalf(template string, args ...any) {
gLogger.SugaredLogger.Fatalf(template, args...)
}
func (logger *Logger) SDebug(args ...interface{}) {
logger.SugaredLogger.Debugln(args...)
}
func (logger *Logger) SInfo(args ...interface{}) {
logger.SugaredLogger.Infoln(args...)
}
func (logger *Logger) SWarn(args ...interface{}) {
logger.SugaredLogger.Warnln(args...)
}
func (logger *Logger) SError(args ...interface{}) {
logger.SugaredLogger.Errorln(args...)
}
func (logger *Logger) SStackError(args ...interface{}) {
gLogger.stack = true
logger.SugaredLogger.Errorln(args...)
gLogger.stack = false
}
func (logger *Logger) SFatal(args ...interface{}) {
gLogger.stack = true
logger.SugaredLogger.Fatalln(args...)
gLogger.stack = false
}
func SDebug(args ...interface{}) {
gLogger.SugaredLogger.Debugln(args...)
}
func SInfo(args ...interface{}) {
gLogger.SugaredLogger.Infoln(args...)
}
func SWarn(args ...interface{}) {
gLogger.SugaredLogger.Warnln(args...)
}
func SError(args ...interface{}) {
gLogger.SugaredLogger.Errorln(args...)
}
func SStackError(args ...interface{}) {
gLogger.stack = true
gLogger.SugaredLogger.Errorln(args...)
gLogger.stack = false
}
func SFatal(args ...interface{}) {
gLogger.stack = true
gLogger.SugaredLogger.Fatalln(args...)
gLogger.stack = false
}
func ErrorField(key string, value error) zap.Field {
if value == nil {
return zap.String(key, "nil")
}
return zap.String(key, value.Error())
}
func String(key, value string) zap.Field {
return zap.String(key, value)
}
func Int(key string, value int) zap.Field {
return zap.Int(key, value)
}
func Int64(key string, value int64) zap.Field {
return zap.Int64(key, value)
}
func Int32(key string, value int32) zap.Field {
return zap.Int32(key, value)
}
func Int16(key string, value int16) zap.Field {
return zap.Int16(key, value)
}
func Int8(key string, value int8) zap.Field {
return zap.Int8(key, value)
}
func Uint(key string, value uint) zap.Field {
return zap.Uint(key, value)
}
func Uint64(key string, v uint64) zap.Field {
return zap.Uint64(key, v)
}
func Uint32(key string, value uint32) zap.Field {
return zap.Uint32(key, value)
}
func Uint16(key string, value uint16) zap.Field {
return zap.Uint16(key, value)
}
func Uint8(key string, value uint8) zap.Field {
return zap.Uint8(key, value)
}
func Float64(key string, v float64) zap.Field {
return zap.Float64(key, v)
}
func Bool(key string, v bool) zap.Field {
return zap.Bool(key, v)
}
func Bools(key string, v []bool) zap.Field {
return zap.Bools(key, v)
}
func Time(key string, v time.Time) zap.Field {
return zap.Time(key, v)
}
func Duration(key string, v time.Duration) zap.Field {
return zap.Duration(key, v)
}
func Durations(key string, v []time.Duration) zap.Field {
return zap.Durations(key, v)
}
func Any(key string, value any) zap.Field {
return zap.Any(key, value)
}

View File

@@ -1,7 +1,12 @@
package network
import (
"errors"
"github.com/duanhf2012/origin/v2/log"
"net"
"sync"
"sync/atomic"
"time"
)
type Conn interface {
@@ -13,3 +18,158 @@ type Conn interface {
Destroy()
ReleaseReadMsg(byteBuff []byte)
}
type ConnSet map[net.Conn]struct{}
type NetConn struct {
sync.Mutex
conn net.Conn
writeChan chan []byte
closeFlag int32
msgParser *MsgParser
}
func freeChannel(conn *NetConn) {
for len(conn.writeChan) > 0 {
byteBuff := <-conn.writeChan
if byteBuff != nil {
conn.ReleaseReadMsg(byteBuff)
}
}
}
func newNetConn(conn net.Conn, pendingWriteNum int, msgParser *MsgParser, writeDeadline time.Duration) *NetConn {
netConn := new(NetConn)
netConn.conn = conn
netConn.writeChan = make(chan []byte, pendingWriteNum)
netConn.msgParser = msgParser
go func() {
for b := range netConn.writeChan {
if b == nil {
break
}
conn.SetWriteDeadline(time.Now().Add(writeDeadline))
_, err := conn.Write(b)
netConn.msgParser.ReleaseBytes(b)
if err != nil {
break
}
}
conn.Close()
netConn.Lock()
freeChannel(netConn)
atomic.StoreInt32(&netConn.closeFlag, 1)
netConn.Unlock()
}()
return netConn
}
func (netConn *NetConn) doDestroy() {
netConn.conn.Close()
if atomic.LoadInt32(&netConn.closeFlag) == 0 {
close(netConn.writeChan)
atomic.StoreInt32(&netConn.closeFlag, 1)
}
}
func (netConn *NetConn) Destroy() {
netConn.Lock()
defer netConn.Unlock()
netConn.doDestroy()
}
func (netConn *NetConn) Close() {
netConn.Lock()
defer netConn.Unlock()
if atomic.LoadInt32(&netConn.closeFlag) == 1 {
return
}
netConn.doWrite(nil)
atomic.StoreInt32(&netConn.closeFlag, 1)
}
func (netConn *NetConn) GetRemoteIp() string {
return netConn.conn.RemoteAddr().String()
}
func (netConn *NetConn) doWrite(b []byte) error {
if len(netConn.writeChan) == cap(netConn.writeChan) {
netConn.ReleaseReadMsg(b)
log.Error("close conn: channel full")
netConn.doDestroy()
return errors.New("close conn: channel full")
}
netConn.writeChan <- b
return nil
}
// b must not be modified by the others goroutines
func (netConn *NetConn) Write(b []byte) (int,error) {
netConn.Lock()
defer netConn.Unlock()
if atomic.LoadInt32(&netConn.closeFlag) == 1 || b == nil {
netConn.ReleaseReadMsg(b)
return 0,errors.New("conn is close")
}
return len(b),netConn.doWrite(b)
}
func (netConn *NetConn) Read(b []byte) (int, error) {
return netConn.conn.Read(b)
}
func (netConn *NetConn) LocalAddr() net.Addr {
return netConn.conn.LocalAddr()
}
func (netConn *NetConn) RemoteAddr() net.Addr {
return netConn.conn.RemoteAddr()
}
func (netConn *NetConn) ReadMsg() ([]byte, error) {
return netConn.msgParser.Read(netConn)
}
func (netConn *NetConn) GetRecyclerReaderBytes() func(data []byte) {
return netConn.msgParser.GetRecyclerReaderBytes()
}
func (netConn *NetConn) ReleaseReadMsg(byteBuff []byte) {
netConn.msgParser.ReleaseBytes(byteBuff)
}
func (netConn *NetConn) WriteMsg(args ...[]byte) error {
if atomic.LoadInt32(&netConn.closeFlag) == 1 {
return errors.New("conn is close")
}
return netConn.msgParser.Write(netConn, args...)
}
func (netConn *NetConn) WriteRawMsg(args []byte) error {
if atomic.LoadInt32(&netConn.closeFlag) == 1 {
return errors.New("conn is close")
}
_,err:= netConn.Write(args)
return err
}
func (netConn *NetConn) IsConnected() bool {
return atomic.LoadInt32(&netConn.closeFlag) == 0
}
func (netConn *NetConn) SetReadDeadline(d time.Duration) {
netConn.conn.SetReadDeadline(time.Now().Add(d))
}
func (netConn *NetConn) SetWriteDeadline(d time.Duration) {
netConn.conn.SetWriteDeadline(time.Now().Add(d))
}

View File

@@ -3,25 +3,25 @@ package network
import (
"crypto/tls"
"errors"
"github.com/duanhf2012/origin/log"
"github.com/duanhf2012/origin/v2/log"
"net/http"
"time"
)
var DefaultMaxHeaderBytes int = 1<<20
var DefaultMaxHeaderBytes = 1 << 20
type CAFile struct {
CertFile string
Keyfile string
Keyfile string
}
type HttpServer struct {
listenAddr string
listenAddr string
readTimeout time.Duration
writeTimeout time.Duration
handler http.Handler
caFileList []CAFile
handler http.Handler
caFileList []CAFile
httpServer *http.Server
}
@@ -39,7 +39,7 @@ func (slf *HttpServer) Start() {
func (slf *HttpServer) startListen() error {
if slf.httpServer != nil {
return errors.New("Duplicate start not allowed")
return errors.New("duplicate start not allowed")
}
var tlsCaList []tls.Certificate
@@ -47,7 +47,7 @@ func (slf *HttpServer) startListen() error {
for _, caFile := range slf.caFileList {
cer, err := tls.LoadX509KeyPair(caFile.CertFile, caFile.Keyfile)
if err != nil {
log.Fatal("Load CA file is fail",log.String("error",err.Error()),log.String("certFile",caFile.CertFile),log.String("keyFile",caFile.Keyfile))
log.Fatal("Load CA file is fail", log.String("error", err.Error()), log.String("certFile", caFile.CertFile), log.String("keyFile", caFile.Keyfile))
return err
}
tlsCaList = append(tlsCaList, cer)
@@ -74,14 +74,13 @@ func (slf *HttpServer) startListen() error {
}
if err != nil {
log.Fatal("Listen failure",log.String("error",err.Error()),log.String("addr:",slf.listenAddr))
log.Fatal("Listen failure", log.String("error", err.Error()), log.String("addr:", slf.listenAddr))
return err
}
return nil
}
func (slf *HttpServer) SetCAFile(caFile []CAFile) {
slf.caFileList = caFile
}

161
network/kcp_client.go Normal file
View File

@@ -0,0 +1,161 @@
package network
import (
"github.com/duanhf2012/origin/v2/log"
"github.com/xtaci/kcp-go/v5"
"net"
"sync"
"time"
)
type KCPClient struct {
sync.Mutex
Addr string
ConnNum int
ConnectInterval time.Duration
PendingWriteNum int
ReadDeadline time.Duration
WriteDeadline time.Duration
AutoReconnect bool
NewAgent func(conn *NetConn) Agent
cons ConnSet
wg sync.WaitGroup
closeFlag bool
// msg parser
MsgParser
}
func (client *KCPClient) Start() {
client.init()
for i := 0; i < client.ConnNum; i++ {
client.wg.Add(1)
go client.connect()
}
}
func (client *KCPClient) init() {
client.Lock()
defer client.Unlock()
if client.ConnNum <= 0 {
client.ConnNum = 1
log.Info("invalid ConnNum", log.Int("reset", client.ConnNum))
}
if client.ConnectInterval <= 0 {
client.ConnectInterval = 3 * time.Second
log.Info("invalid ConnectInterval", log.Duration("reset", client.ConnectInterval))
}
if client.PendingWriteNum <= 0 {
client.PendingWriteNum = 1000
log.Info("invalid PendingWriteNum", log.Int("reset", client.PendingWriteNum))
}
if client.ReadDeadline == 0 {
client.ReadDeadline = 15 * time.Second
log.Info("invalid ReadDeadline", log.Int64("reset", int64(client.ReadDeadline.Seconds())))
}
if client.WriteDeadline == 0 {
client.WriteDeadline = 15 * time.Second
log.Info("invalid WriteDeadline", log.Int64("reset", int64(client.WriteDeadline.Seconds())))
}
if client.NewAgent == nil {
log.Fatal("NewAgent must not be nil")
}
if client.cons != nil {
log.Fatal("client is running")
}
if client.MinMsgLen == 0 {
client.MinMsgLen = Default_MinMsgLen
}
if client.MaxMsgLen == 0 {
client.MaxMsgLen = Default_MaxMsgLen
}
if client.LenMsgLen == 0 {
client.LenMsgLen = Default_LenMsgLen
}
maxMsgLen := client.MsgParser.getMaxMsgLen()
if client.MaxMsgLen > maxMsgLen {
client.MaxMsgLen = maxMsgLen
log.Info("invalid MaxMsgLen", log.Uint32("reset", maxMsgLen))
}
client.cons = make(ConnSet)
client.closeFlag = false
client.MsgParser.Init()
}
func (client *KCPClient) GetCloseFlag() bool {
client.Lock()
defer client.Unlock()
return client.closeFlag
}
func (client *KCPClient) dial() net.Conn {
for {
conn, err := kcp.DialWithOptions(client.Addr, nil, 10, 3)
if client.closeFlag {
return conn
} else if err == nil && conn != nil {
conn.SetNoDelay(1, 10, 2, 1)
conn.SetDSCP(46)
conn.SetStreamMode(true)
conn.SetWindowSize(1024, 1024)
return conn
}
log.Warn("connect error ", log.String("error", err.Error()), log.String("Addr", client.Addr))
time.Sleep(client.ConnectInterval)
continue
}
}
func (client *KCPClient) connect() {
defer client.wg.Done()
reconnect:
conn := client.dial()
if conn == nil {
return
}
client.Lock()
if client.closeFlag {
client.Unlock()
conn.Close()
return
}
client.cons[conn] = struct{}{}
client.Unlock()
netConn := newNetConn(conn, client.PendingWriteNum, &client.MsgParser, client.WriteDeadline)
agent := client.NewAgent(netConn)
agent.Run()
// cleanup
netConn.Close()
client.Lock()
delete(client.cons, conn)
client.Unlock()
agent.OnClose()
if client.AutoReconnect {
time.Sleep(client.ConnectInterval)
goto reconnect
}
}
func (client *KCPClient) Close(waitDone bool) {
client.Lock()
client.closeFlag = true
for conn := range client.cons {
conn.Close()
}
client.cons = nil
client.Unlock()
if waitDone == true {
client.wg.Wait()
}
}

257
network/kcp_server.go Normal file
View File

@@ -0,0 +1,257 @@
package network
import (
"github.com/duanhf2012/origin/v2/log"
"github.com/duanhf2012/origin/v2/network/processor"
"github.com/xtaci/kcp-go/v5"
"sync"
"time"
)
type KCPServer struct {
NewAgent func(Conn) Agent
kcpCfg *KcpCfg
blockCrypt kcp.BlockCrypt
process processor.IRawProcessor
msgParser MsgParser
conns ConnSet
mutexConns sync.Mutex
wgLn sync.WaitGroup
wgConns sync.WaitGroup
listener *kcp.Listener
}
/*
NoDelayCfg
普通模式: ikcp_nodelay(kcp, 0, 40, 0, 0);
极速模式: ikcp_nodelay(kcp, 1, 10, 2, 1);
*/
type NoDelayCfg struct {
NoDelay int // 是否启用 nodelay模式0不启用1启用
IntervalMill int // 协议内部工作的 interval单位毫秒比如 10ms或者 20ms
Resend int // 快速重传模式默认0关闭可以设置22次ACK跨越将会直接重传
CongestionControl int // 是否关闭流控默认是0代表不关闭1代表关闭
}
const (
DefaultNoDelay = 1
DefaultIntervalMill = 10
DefaultResend = 2
DefaultCongestionControl = 1
DefaultMtu = 1400
DefaultSndWndSize = 4096
DefaultRcvWndSize = 4096
DefaultStreamMode = true
DefaultDSCP = 46
DefaultDataShards = 10
DefaultParityShards = 0
DefaultReadDeadlineMill = 15 * time.Second
DefaultWriteDeadlineMill = 15 * time.Second
DefaultMaxConnNum = 20000
)
type KcpCfg struct {
ListenAddr string // 监听地址
MaxConnNum int //最大连接数
NoDelay *NoDelayCfg
Mtu *int // mtu大小
SndWndSize *int // 发送窗口大小,默认1024
RcvWndSize *int // 接收窗口大小,默认1024
ReadDeadlineMill *time.Duration // 读超时毫秒
WriteDeadlineMill *time.Duration // 写超时毫秒
StreamMode *bool // 是否打开流模式,默认true
DSCP *int // 差分服务代码点默认46
ReadBuffSize *int // 读Buff大小,默认
WriteBuffSize *int // 写Buff大小
// 用于 FEC前向纠错的数据分片和校验分片数量,默认10,0
DataShards *int
ParityShards *int
// 包体内容
LittleEndian bool //是否小端序
LenMsgLen int //消息头占用byte数量只能是1byte,2byte,4byte。如果是4byte意味着消息最大可以是math.MaxUint32(4GB)
MinMsgLen uint32 //最小消息长度
MaxMsgLen uint32 //最大消息长度,超过判定不合法,断开连接
PendingWriteNum int //写channel最大消息数量
}
func (kp *KCPServer) Init(kcpCfg *KcpCfg) {
kp.kcpCfg = kcpCfg
kp.msgParser.Init()
kp.msgParser.LenMsgLen = kp.kcpCfg.LenMsgLen
kp.msgParser.MaxMsgLen = kp.kcpCfg.MaxMsgLen
kp.msgParser.MinMsgLen = kp.kcpCfg.MinMsgLen
kp.msgParser.LittleEndian = kp.kcpCfg.LittleEndian
// setting default noDelay
if kp.kcpCfg.NoDelay == nil {
var noDelay NoDelayCfg
noDelay.NoDelay = DefaultNoDelay
noDelay.IntervalMill = DefaultIntervalMill
noDelay.Resend = DefaultResend
noDelay.CongestionControl = DefaultCongestionControl
kp.kcpCfg.NoDelay = &noDelay
}
if kp.kcpCfg.Mtu == nil {
mtu := DefaultMtu
kp.kcpCfg.Mtu = &mtu
}
if kp.kcpCfg.SndWndSize == nil {
sndWndSize := DefaultSndWndSize
kp.kcpCfg.SndWndSize = &sndWndSize
}
if kp.kcpCfg.RcvWndSize == nil {
rcvWndSize := DefaultRcvWndSize
kp.kcpCfg.RcvWndSize = &rcvWndSize
}
if kp.kcpCfg.ReadDeadlineMill == nil {
readDeadlineMill := DefaultReadDeadlineMill
kp.kcpCfg.ReadDeadlineMill = &readDeadlineMill
} else {
*kp.kcpCfg.ReadDeadlineMill *= time.Millisecond
}
if kp.kcpCfg.WriteDeadlineMill == nil {
writeDeadlineMill := DefaultWriteDeadlineMill
kp.kcpCfg.WriteDeadlineMill = &writeDeadlineMill
} else {
*kp.kcpCfg.WriteDeadlineMill *= time.Millisecond
}
if kp.kcpCfg.StreamMode == nil {
streamMode := DefaultStreamMode
kp.kcpCfg.StreamMode = &streamMode
}
if kp.kcpCfg.DataShards == nil {
dataShards := DefaultDataShards
kp.kcpCfg.DataShards = &dataShards
}
if kp.kcpCfg.ParityShards == nil {
parityShards := DefaultParityShards
kp.kcpCfg.ParityShards = &parityShards
}
if kp.kcpCfg.DSCP == nil {
dss := DefaultDSCP
kp.kcpCfg.DSCP = &dss
}
if kp.kcpCfg.MaxConnNum == 0 {
kp.kcpCfg.MaxConnNum = DefaultMaxConnNum
}
kp.conns = make(ConnSet, 2048)
kp.msgParser.Init()
return
}
func (kp *KCPServer) Start() error {
listener, err := kcp.ListenWithOptions(kp.kcpCfg.ListenAddr, kp.blockCrypt, *kp.kcpCfg.DataShards, *kp.kcpCfg.ParityShards)
if err != nil {
return err
}
if kp.kcpCfg.ReadBuffSize != nil {
err = listener.SetReadBuffer(*kp.kcpCfg.ReadBuffSize)
if err != nil {
return err
}
}
if kp.kcpCfg.WriteBuffSize != nil {
err = listener.SetWriteBuffer(*kp.kcpCfg.WriteBuffSize)
if err != nil {
return err
}
}
err = listener.SetDSCP(*kp.kcpCfg.DSCP)
if err != nil {
return err
}
kp.listener = listener
kp.wgLn.Add(1)
go func() {
defer kp.wgLn.Done()
for kp.run(listener) {
}
}()
return nil
}
func (kp *KCPServer) initSession(session *kcp.UDPSession) {
session.SetStreamMode(*kp.kcpCfg.StreamMode)
session.SetWindowSize(*kp.kcpCfg.SndWndSize, *kp.kcpCfg.RcvWndSize)
session.SetNoDelay(kp.kcpCfg.NoDelay.NoDelay, kp.kcpCfg.NoDelay.IntervalMill, kp.kcpCfg.NoDelay.Resend, kp.kcpCfg.NoDelay.CongestionControl)
session.SetDSCP(*kp.kcpCfg.DSCP)
session.SetMtu(*kp.kcpCfg.Mtu)
session.SetACKNoDelay(false)
//session.SetWriteDeadline(time.Now().Add(*kp.kcpCfg.WriteDeadlineMill))
}
func (kp *KCPServer) run(listener *kcp.Listener) bool {
conn, err := listener.Accept()
if err != nil {
log.Error("accept error", log.String("ListenAddr", kp.kcpCfg.ListenAddr), log.ErrorField("err", err))
return false
}
kp.mutexConns.Lock()
if len(kp.conns) >= kp.kcpCfg.MaxConnNum {
kp.mutexConns.Unlock()
conn.Close()
log.Warn("too many connections")
return true
}
kp.conns[conn] = struct{}{}
kp.mutexConns.Unlock()
if kp.kcpCfg.ReadBuffSize != nil {
conn.(*kcp.UDPSession).SetReadBuffer(*kp.kcpCfg.ReadBuffSize)
}
if kp.kcpCfg.WriteBuffSize != nil {
conn.(*kcp.UDPSession).SetWriteBuffer(*kp.kcpCfg.WriteBuffSize)
}
kp.initSession(conn.(*kcp.UDPSession))
netConn := newNetConn(conn, kp.kcpCfg.PendingWriteNum, &kp.msgParser, *kp.kcpCfg.WriteDeadlineMill)
agent := kp.NewAgent(netConn)
kp.wgConns.Add(1)
go func() {
agent.Run()
// cleanup
conn.Close()
kp.mutexConns.Lock()
delete(kp.conns, conn)
kp.mutexConns.Unlock()
agent.OnClose()
kp.wgConns.Done()
}()
return true
}
func (kp *KCPServer) Close() {
kp.listener.Close()
kp.wgLn.Wait()
kp.mutexConns.Lock()
for conn := range kp.conns {
conn.Close()
}
kp.conns = nil
kp.mutexConns.Unlock()
kp.wgConns.Wait()
}

View File

@@ -3,8 +3,8 @@ package processor
import (
"encoding/json"
"fmt"
"github.com/duanhf2012/origin/log"
"github.com/duanhf2012/origin/util/bytespool"
"github.com/duanhf2012/origin/v2/log"
"github.com/duanhf2012/origin/v2/util/bytespool"
"reflect"
)
@@ -13,29 +13,29 @@ type MessageJsonInfo struct {
msgHandler MessageJsonHandler
}
type MessageJsonHandler func(clientId uint64,msg interface{})
type ConnectJsonHandler func(clientId uint64)
type UnknownMessageJsonHandler func(clientId uint64,msg []byte)
type MessageJsonHandler func(clientId string, msg interface{})
type ConnectJsonHandler func(clientId string)
type UnknownMessageJsonHandler func(clientId string, msg []byte)
type JsonProcessor struct {
mapMsg map[uint16]MessageJsonInfo
mapMsg map[uint16]MessageJsonInfo
LittleEndian bool
unknownMessageHandler UnknownMessageJsonHandler
connectHandler ConnectJsonHandler
disconnectHandler ConnectJsonHandler
bytespool.IBytesMempool
connectHandler ConnectJsonHandler
disconnectHandler ConnectJsonHandler
bytespool.IBytesMemPool
}
type JsonPackInfo struct {
typ uint16
msg interface{}
typ uint16
msg interface{}
rawMsg []byte
}
func NewJsonProcessor() *JsonProcessor {
processor := &JsonProcessor{mapMsg:map[uint16]MessageJsonInfo{}}
processor.IBytesMempool = bytespool.NewMemAreaPool()
processor := &JsonProcessor{mapMsg: map[uint16]MessageJsonInfo{}}
processor.IBytesMemPool = bytespool.NewMemAreaPool()
return processor
}
@@ -44,97 +44,101 @@ func (jsonProcessor *JsonProcessor) SetByteOrder(littleEndian bool) {
jsonProcessor.LittleEndian = littleEndian
}
// must goroutine safe
func (jsonProcessor *JsonProcessor ) MsgRoute(clientId uint64,msg interface{}) error{
// MsgRoute must goroutine safe
func (jsonProcessor *JsonProcessor) MsgRoute(clientId string, msg interface{}, recyclerReaderBytes func(data []byte)) error {
pPackInfo := msg.(*JsonPackInfo)
v,ok := jsonProcessor.mapMsg[pPackInfo.typ]
defer recyclerReaderBytes(pPackInfo.rawMsg)
v, ok := jsonProcessor.mapMsg[pPackInfo.typ]
if ok == false {
return fmt.Errorf("cannot find msgtype %d is register!",pPackInfo.typ)
return fmt.Errorf("cannot find msgtype %d is register", pPackInfo.typ)
}
v.msgHandler(clientId,pPackInfo.msg)
v.msgHandler(clientId, pPackInfo.msg)
return nil
}
func (jsonProcessor *JsonProcessor) Unmarshal(clientId uint64,data []byte) (interface{}, error) {
typeStruct := struct {Type int `json:"typ"`}{}
defer jsonProcessor.ReleaseBytes(data)
func (jsonProcessor *JsonProcessor) Unmarshal(clientId string, data []byte) (interface{}, error) {
typeStruct := struct {
Type int `json:"typ"`
}{}
err := json.Unmarshal(data, &typeStruct)
if err != nil {
return nil, err
}
msgType := uint16(typeStruct.Type)
info,ok := jsonProcessor.mapMsg[msgType]
info, ok := jsonProcessor.mapMsg[msgType]
if ok == false {
return nil,fmt.Errorf("Cannot find register %d msgType!",msgType)
return nil, fmt.Errorf("cannot find register %d msgType", msgType)
}
msgData := reflect.New(info.msgType.Elem()).Interface()
err = json.Unmarshal(data, msgData)
if err != nil {
return nil,err
return nil, err
}
return &JsonPackInfo{typ:msgType,msg:msgData},nil
return &JsonPackInfo{typ: msgType, msg: msgData, rawMsg: data}, nil
}
func (jsonProcessor *JsonProcessor) Marshal(clientId uint64,msg interface{}) ([]byte, error) {
rawMsg,err := json.Marshal(msg)
func (jsonProcessor *JsonProcessor) Marshal(clientId string, msg interface{}) ([]byte, error) {
rawMsg, err := json.Marshal(msg)
if err != nil {
return nil,err
return nil, err
}
return rawMsg,nil
return rawMsg, nil
}
func (jsonProcessor *JsonProcessor) Register(msgtype uint16,msg interface{},handle MessageJsonHandler) {
func (jsonProcessor *JsonProcessor) Register(msgType uint16, msg interface{}, handle MessageJsonHandler) {
var info MessageJsonInfo
info.msgType = reflect.TypeOf(msg)
info.msgHandler = handle
jsonProcessor.mapMsg[msgtype] = info
jsonProcessor.mapMsg[msgType] = info
}
func (jsonProcessor *JsonProcessor) MakeMsg(msgType uint16,msg interface{}) *JsonPackInfo {
return &JsonPackInfo{typ:msgType,msg:msg}
func (jsonProcessor *JsonProcessor) MakeMsg(msgType uint16, msg interface{}) *JsonPackInfo {
return &JsonPackInfo{typ: msgType, msg: msg}
}
func (jsonProcessor *JsonProcessor) MakeRawMsg(msgType uint16,msg []byte) *JsonPackInfo {
return &JsonPackInfo{typ:msgType,rawMsg:msg}
func (jsonProcessor *JsonProcessor) MakeRawMsg(msgType uint16, msg []byte) *JsonPackInfo {
return &JsonPackInfo{typ: msgType, rawMsg: msg}
}
func (jsonProcessor *JsonProcessor) UnknownMsgRoute(clientId uint64,msg interface{}){
if jsonProcessor.unknownMessageHandler==nil {
log.Debug("Unknown message",log.Uint64("clientId",clientId))
func (jsonProcessor *JsonProcessor) UnknownMsgRoute(clientId string, msg interface{}, recyclerReaderBytes func(data []byte)) {
defer recyclerReaderBytes(msg.([]byte))
if jsonProcessor.unknownMessageHandler == nil {
log.Debug("Unknown message", log.String("clientId", clientId))
return
}
jsonProcessor.unknownMessageHandler(clientId,msg.([]byte))
jsonProcessor.unknownMessageHandler(clientId, msg.([]byte))
}
func (jsonProcessor *JsonProcessor) ConnectedRoute(clientId uint64){
func (jsonProcessor *JsonProcessor) ConnectedRoute(clientId string) {
if jsonProcessor.connectHandler != nil {
jsonProcessor.connectHandler(clientId)
}
}
func (jsonProcessor *JsonProcessor) DisConnectedRoute(clientId uint64){
func (jsonProcessor *JsonProcessor) DisConnectedRoute(clientId string) {
if jsonProcessor.disconnectHandler != nil {
jsonProcessor.disconnectHandler(clientId)
}
}
func (jsonProcessor *JsonProcessor) RegisterUnknownMsg(unknownMessageHandler UnknownMessageJsonHandler){
func (jsonProcessor *JsonProcessor) RegisterUnknownMsg(unknownMessageHandler UnknownMessageJsonHandler) {
jsonProcessor.unknownMessageHandler = unknownMessageHandler
}
func (jsonProcessor *JsonProcessor) RegisterConnected(connectHandler ConnectJsonHandler){
func (jsonProcessor *JsonProcessor) RegisterConnected(connectHandler ConnectJsonHandler) {
jsonProcessor.connectHandler = connectHandler
}
func (jsonProcessor *JsonProcessor) RegisterDisConnected(disconnectHandler ConnectJsonHandler){
func (jsonProcessor *JsonProcessor) RegisterDisConnected(disconnectHandler ConnectJsonHandler) {
jsonProcessor.disconnectHandler = disconnectHandler
}

View File

@@ -3,7 +3,7 @@ package processor
import (
"encoding/binary"
"fmt"
"github.com/duanhf2012/origin/util/bytespool"
"github.com/duanhf2012/origin/v2/util/bytespool"
"google.golang.org/protobuf/proto"
"reflect"
)
@@ -13,9 +13,9 @@ type MessageInfo struct {
msgHandler MessageHandler
}
type MessageHandler func(clientId uint64, msg proto.Message)
type ConnectHandler func(clientId uint64)
type UnknownMessageHandler func(clientId uint64, msg []byte)
type MessageHandler func(clientId string, msg proto.Message)
type ConnectHandler func(clientId string)
type UnknownMessageHandler func(clientId string, msg []byte)
const MsgTypeSize = 2
@@ -26,7 +26,7 @@ type PBProcessor struct {
unknownMessageHandler UnknownMessageHandler
connectHandler ConnectHandler
disconnectHandler ConnectHandler
bytespool.IBytesMempool
bytespool.IBytesMemPool
}
type PBPackInfo struct {
@@ -37,7 +37,7 @@ type PBPackInfo struct {
func NewPBProcessor() *PBProcessor {
processor := &PBProcessor{mapMsg: map[uint16]MessageInfo{}}
processor.IBytesMempool = bytespool.NewMemAreaPool()
processor.IBytesMemPool = bytespool.NewMemAreaPool()
return processor
}
@@ -53,26 +53,27 @@ func (slf *PBPackInfo) GetMsg() proto.Message {
return slf.msg
}
// must goroutine safe
func (pbProcessor *PBProcessor) MsgRoute(clientId uint64, msg interface{}) error {
// MsgRoute must goroutine safe
func (pbProcessor *PBProcessor) MsgRoute(clientId string, msg interface{}, recyclerReaderBytes func(data []byte)) error {
pPackInfo := msg.(*PBPackInfo)
defer recyclerReaderBytes(pPackInfo.rawMsg)
v, ok := pbProcessor.mapMsg[pPackInfo.typ]
if ok == false {
return fmt.Errorf("Cannot find msgtype %d is register!", pPackInfo.typ)
return fmt.Errorf("cannot find msgtype %d is register", pPackInfo.typ)
}
v.msgHandler(clientId, pPackInfo.msg)
return nil
}
// must goroutine safe
func (pbProcessor *PBProcessor) Unmarshal(clientId uint64, data []byte) (interface{}, error) {
defer pbProcessor.ReleaseBytes(data)
// Unmarshal must goroutine safe
func (pbProcessor *PBProcessor) Unmarshal(clientId string, data []byte) (interface{}, error) {
return pbProcessor.UnmarshalWithOutRelease(clientId, data)
}
// unmarshal but not release data
func (pbProcessor *PBProcessor) UnmarshalWithOutRelease(clientId uint64, data []byte) (interface{}, error) {
// UnmarshalWithOutRelease not release data
func (pbProcessor *PBProcessor) UnmarshalWithOutRelease(clientId string, data []byte) (interface{}, error) {
var msgType uint16
if pbProcessor.LittleEndian == true {
msgType = binary.LittleEndian.Uint16(data[:2])
@@ -82,7 +83,7 @@ func (pbProcessor *PBProcessor) UnmarshalWithOutRelease(clientId uint64, data []
info, ok := pbProcessor.mapMsg[msgType]
if ok == false {
return nil, fmt.Errorf("cannot find register %d msgtype!", msgType)
return nil, fmt.Errorf("cannot find register %d msgtype", msgType)
}
msg := reflect.New(info.msgType.Elem()).Interface()
protoMsg := msg.(proto.Message)
@@ -91,11 +92,11 @@ func (pbProcessor *PBProcessor) UnmarshalWithOutRelease(clientId uint64, data []
return nil, err
}
return &PBPackInfo{typ: msgType, msg: protoMsg}, nil
return &PBPackInfo{typ: msgType, msg: protoMsg, rawMsg: data}, nil
}
// must goroutine safe
func (pbProcessor *PBProcessor) Marshal(clientId uint64, msg interface{}) ([]byte, error) {
// Marshal must goroutine safe
func (pbProcessor *PBProcessor) Marshal(clientId string, msg interface{}) ([]byte, error) {
pMsg := msg.(*PBPackInfo)
var err error
@@ -117,12 +118,12 @@ func (pbProcessor *PBProcessor) Marshal(clientId uint64, msg interface{}) ([]byt
return buff, nil
}
func (pbProcessor *PBProcessor) Register(msgtype uint16, msg proto.Message, handle MessageHandler) {
func (pbProcessor *PBProcessor) Register(msgType uint16, msg proto.Message, handle MessageHandler) {
var info MessageInfo
info.msgType = reflect.TypeOf(msg.(proto.Message))
info.msgHandler = handle
pbProcessor.mapMsg[msgtype] = info
pbProcessor.mapMsg[msgType] = info
}
func (pbProcessor *PBProcessor) MakeMsg(msgType uint16, protoMsg proto.Message) *PBPackInfo {
@@ -133,16 +134,16 @@ func (pbProcessor *PBProcessor) MakeRawMsg(msgType uint16, msg []byte) *PBPackIn
return &PBPackInfo{typ: msgType, rawMsg: msg}
}
func (pbProcessor *PBProcessor) UnknownMsgRoute(clientId uint64, msg interface{}) {
func (pbProcessor *PBProcessor) UnknownMsgRoute(clientId string, msg interface{}, recyclerReaderBytes func(data []byte)) {
pbProcessor.unknownMessageHandler(clientId, msg.([]byte))
recyclerReaderBytes(msg.([]byte))
}
// connect event
func (pbProcessor *PBProcessor) ConnectedRoute(clientId uint64) {
func (pbProcessor *PBProcessor) ConnectedRoute(clientId string) {
pbProcessor.connectHandler(clientId)
}
func (pbProcessor *PBProcessor) DisConnectedRoute(clientId uint64) {
func (pbProcessor *PBProcessor) DisConnectedRoute(clientId string) {
pbProcessor.disconnectHandler(clientId)
}

View File

@@ -10,21 +10,22 @@ type RawMessageInfo struct {
msgHandler RawMessageHandler
}
type RawMessageHandler func(clientId uint64,packType uint16,msg []byte)
type RawConnectHandler func(clientId uint64)
type UnknownRawMessageHandler func(clientId uint64,msg []byte)
type RawMessageHandler func(clientId string, packType uint16, msg []byte)
type RawConnectHandler func(clientId string)
type UnknownRawMessageHandler func(clientId string, msg []byte)
const RawMsgTypeSize = 2
type PBRawProcessor struct {
msgHandler RawMessageHandler
LittleEndian bool
msgHandler RawMessageHandler
LittleEndian bool
unknownMessageHandler UnknownRawMessageHandler
connectHandler RawConnectHandler
disconnectHandler RawConnectHandler
connectHandler RawConnectHandler
disconnectHandler RawConnectHandler
}
type PBRawPackInfo struct {
typ uint16
typ uint16
rawMsg []byte
}
@@ -37,74 +38,76 @@ func (pbRawProcessor *PBRawProcessor) SetByteOrder(littleEndian bool) {
pbRawProcessor.LittleEndian = littleEndian
}
// must goroutine safe
func (pbRawProcessor *PBRawProcessor ) MsgRoute(clientId uint64, msg interface{}) error{
// MsgRoute must goroutine safe
func (pbRawProcessor *PBRawProcessor) MsgRoute(clientId string, msg interface{}, recyclerReaderBytes func(data []byte)) error {
pPackInfo := msg.(*PBRawPackInfo)
pbRawProcessor.msgHandler(clientId,pPackInfo.typ,pPackInfo.rawMsg)
pbRawProcessor.msgHandler(clientId, pPackInfo.typ, pPackInfo.rawMsg)
recyclerReaderBytes(pPackInfo.rawMsg)
return nil
}
// must goroutine safe
func (pbRawProcessor *PBRawProcessor ) Unmarshal(clientId uint64,data []byte) (interface{}, error) {
// Unmarshal must goroutine safe
func (pbRawProcessor *PBRawProcessor) Unmarshal(clientId string, data []byte) (interface{}, error) {
var msgType uint16
if pbRawProcessor.LittleEndian == true {
msgType = binary.LittleEndian.Uint16(data[:2])
}else{
} else {
msgType = binary.BigEndian.Uint16(data[:2])
}
return &PBRawPackInfo{typ:msgType,rawMsg:data},nil
return &PBRawPackInfo{typ: msgType, rawMsg: data}, nil
}
// must goroutine safe
func (pbRawProcessor *PBRawProcessor ) Marshal(clientId uint64,msg interface{}) ([]byte, error){
// Marshal must goroutine safe
func (pbRawProcessor *PBRawProcessor) Marshal(clientId string, msg interface{}) ([]byte, error) {
pMsg := msg.(*PBRawPackInfo)
buff := make([]byte, 2, len(pMsg.rawMsg)+RawMsgTypeSize)
if pbRawProcessor.LittleEndian == true {
binary.LittleEndian.PutUint16(buff[:2],pMsg.typ)
}else{
binary.BigEndian.PutUint16(buff[:2],pMsg.typ)
binary.LittleEndian.PutUint16(buff[:2], pMsg.typ)
} else {
binary.BigEndian.PutUint16(buff[:2], pMsg.typ)
}
buff = append(buff,pMsg.rawMsg...)
return buff,nil
buff = append(buff, pMsg.rawMsg...)
return buff, nil
}
func (pbRawProcessor *PBRawProcessor) SetRawMsgHandler(handle RawMessageHandler) {
func (pbRawProcessor *PBRawProcessor) SetRawMsgHandler(handle RawMessageHandler) {
pbRawProcessor.msgHandler = handle
}
func (pbRawProcessor *PBRawProcessor) MakeRawMsg(msgType uint16,msg []byte,pbRawPackInfo *PBRawPackInfo) {
func (pbRawProcessor *PBRawProcessor) MakeRawMsg(msgType uint16, msg []byte, pbRawPackInfo *PBRawPackInfo) {
pbRawPackInfo.typ = msgType
pbRawPackInfo.rawMsg = msg
}
func (pbRawProcessor *PBRawProcessor) UnknownMsgRoute(clientId uint64,msg interface{}){
func (pbRawProcessor *PBRawProcessor) UnknownMsgRoute(clientId string, msg interface{}, recyclerReaderBytes func(data []byte)) {
defer recyclerReaderBytes(msg.([]byte))
if pbRawProcessor.unknownMessageHandler == nil {
return
}
pbRawProcessor.unknownMessageHandler(clientId,msg.([]byte))
pbRawProcessor.unknownMessageHandler(clientId, msg.([]byte))
}
// connect event
func (pbRawProcessor *PBRawProcessor) ConnectedRoute(clientId uint64){
func (pbRawProcessor *PBRawProcessor) ConnectedRoute(clientId string) {
pbRawProcessor.connectHandler(clientId)
}
func (pbRawProcessor *PBRawProcessor) DisConnectedRoute(clientId uint64){
func (pbRawProcessor *PBRawProcessor) DisConnectedRoute(clientId string) {
pbRawProcessor.disconnectHandler(clientId)
}
func (pbRawProcessor *PBRawProcessor) SetUnknownMsgHandler(unknownMessageHandler UnknownRawMessageHandler){
func (pbRawProcessor *PBRawProcessor) SetUnknownMsgHandler(unknownMessageHandler UnknownRawMessageHandler) {
pbRawProcessor.unknownMessageHandler = unknownMessageHandler
}
func (pbRawProcessor *PBRawProcessor) SetConnectedHandler(connectHandler RawConnectHandler){
func (pbRawProcessor *PBRawProcessor) SetConnectedHandler(connectHandler RawConnectHandler) {
pbRawProcessor.connectHandler = connectHandler
}
func (pbRawProcessor *PBRawProcessor) SetDisConnectedHandler(disconnectHandler RawConnectHandler){
func (pbRawProcessor *PBRawProcessor) SetDisConnectedHandler(disconnectHandler RawConnectHandler) {
pbRawProcessor.disconnectHandler = disconnectHandler
}
@@ -116,7 +119,7 @@ func (slf *PBRawPackInfo) GetMsg() []byte {
return slf.rawMsg
}
func (slf *PBRawPackInfo) SetPackInfo(typ uint16,rawMsg []byte){
func (slf *PBRawPackInfo) SetPackInfo(typ uint16, rawMsg []byte) {
slf.typ = typ
slf.rawMsg = rawMsg
}
}

View File

@@ -1,19 +1,18 @@
package processor
type IProcessor interface {
// must goroutine safe
MsgRoute(clientId uint64,msg interface{}) error
//must goroutine safe
UnknownMsgRoute(clientId uint64,msg interface{})
// connect event
ConnectedRoute(clientId uint64)
DisConnectedRoute(clientId uint64)
// MsgRoute must goroutine safe
MsgRoute(clientId string, msg interface{}, recyclerReaderBytes func(data []byte)) error
// UnknownMsgRoute must goroutine safe
UnknownMsgRoute(clientId string, msg interface{}, recyclerReaderBytes func(data []byte))
// ConnectedRoute connect event
ConnectedRoute(clientId string)
DisConnectedRoute(clientId string)
// must goroutine safe
Unmarshal(clientId uint64,data []byte) (interface{}, error)
// must goroutine safe
Marshal(clientId uint64,msg interface{}) ([]byte, error)
// Unmarshal must goroutine safe
Unmarshal(clientId string, data []byte) (interface{}, error)
// Marshal must goroutine safe
Marshal(clientId string, msg interface{}) ([]byte, error)
}
type IRawProcessor interface {
@@ -21,9 +20,8 @@ type IRawProcessor interface {
SetByteOrder(littleEndian bool)
SetRawMsgHandler(handle RawMessageHandler)
MakeRawMsg(msgType uint16,msg []byte,pbRawPackInfo *PBRawPackInfo)
MakeRawMsg(msgType uint16, msg []byte, pbRawPackInfo *PBRawPackInfo)
SetUnknownMsgHandler(unknownMessageHandler UnknownRawMessageHandler)
SetConnectedHandler(connectHandler RawConnectHandler)
SetDisConnectedHandler(disconnectHandler RawConnectHandler)
}

View File

@@ -1,7 +1,7 @@
package network
import (
"github.com/duanhf2012/origin/log"
"github.com/duanhf2012/origin/v2/log"
"net"
"sync"
"time"
@@ -14,9 +14,9 @@ type TCPClient struct {
ConnectInterval time.Duration
PendingWriteNum int
ReadDeadline time.Duration
WriteDeadline time.Duration
WriteDeadline time.Duration
AutoReconnect bool
NewAgent func(*TCPConn) Agent
NewAgent func(conn *NetConn) Agent
cons ConnSet
wg sync.WaitGroup
closeFlag bool
@@ -40,23 +40,23 @@ func (client *TCPClient) init() {
if client.ConnNum <= 0 {
client.ConnNum = 1
log.Info("invalid ConnNum",log.Int("reset", client.ConnNum))
log.Info("invalid ConnNum", log.Int("reset", client.ConnNum))
}
if client.ConnectInterval <= 0 {
client.ConnectInterval = 3 * time.Second
log.Info("invalid ConnectInterval",log.Duration("reset", client.ConnectInterval))
log.Info("invalid ConnectInterval", log.Duration("reset", client.ConnectInterval))
}
if client.PendingWriteNum <= 0 {
client.PendingWriteNum = 1000
log.Info("invalid PendingWriteNum",log.Int("reset",client.PendingWriteNum))
log.Info("invalid PendingWriteNum", log.Int("reset", client.PendingWriteNum))
}
if client.ReadDeadline == 0 {
client.ReadDeadline = 15*time.Second
log.Info("invalid ReadDeadline",log.Int64("reset", int64(client.ReadDeadline.Seconds())))
client.ReadDeadline = 15 * time.Second
log.Info("invalid ReadDeadline", log.Int64("reset", int64(client.ReadDeadline.Seconds())))
}
if client.WriteDeadline == 0 {
client.WriteDeadline = 15*time.Second
log.Info("invalid WriteDeadline",log.Int64("reset", int64(client.WriteDeadline.Seconds())))
client.WriteDeadline = 15 * time.Second
log.Info("invalid WriteDeadline", log.Int64("reset", int64(client.WriteDeadline.Seconds())))
}
if client.NewAgent == nil {
log.Fatal("NewAgent must not be nil")
@@ -71,21 +71,21 @@ func (client *TCPClient) init() {
if client.MaxMsgLen == 0 {
client.MaxMsgLen = Default_MaxMsgLen
}
if client.LenMsgLen ==0 {
if client.LenMsgLen == 0 {
client.LenMsgLen = Default_LenMsgLen
}
maxMsgLen := client.MsgParser.getMaxMsgLen(client.LenMsgLen)
maxMsgLen := client.MsgParser.getMaxMsgLen()
if client.MaxMsgLen > maxMsgLen {
client.MaxMsgLen = maxMsgLen
log.Info("invalid MaxMsgLen",log.Uint32("reset", maxMsgLen))
log.Info("invalid MaxMsgLen", log.Uint32("reset", maxMsgLen))
}
client.cons = make(ConnSet)
client.closeFlag = false
client.MsgParser.init()
client.MsgParser.Init()
}
func (client *TCPClient) GetCloseFlag() bool{
func (client *TCPClient) GetCloseFlag() bool {
client.Lock()
defer client.Unlock()
@@ -102,7 +102,7 @@ func (client *TCPClient) dial() net.Conn {
return conn
}
log.Warning("connect error ",log.String("error",err.Error()), log.String("Addr",client.Addr))
log.Warn("connect error ", log.String("error", err.Error()), log.String("Addr", client.Addr))
time.Sleep(client.ConnectInterval)
continue
}
@@ -116,7 +116,7 @@ reconnect:
if conn == nil {
return
}
client.Lock()
if client.closeFlag {
client.Unlock()
@@ -126,7 +126,7 @@ reconnect:
client.cons[conn] = struct{}{}
client.Unlock()
tcpConn := newTCPConn(conn, client.PendingWriteNum, &client.MsgParser,client.WriteDeadline)
tcpConn := newNetConn(conn, client.PendingWriteNum, &client.MsgParser, client.WriteDeadline)
agent := client.NewAgent(tcpConn)
agent.Run()
@@ -152,8 +152,7 @@ func (client *TCPClient) Close(waitDone bool) {
client.cons = nil
client.Unlock()
if waitDone == true{
if waitDone == true {
client.wg.Wait()
}
}

View File

@@ -1,162 +0,0 @@
package network
import (
"errors"
"github.com/duanhf2012/origin/log"
"net"
"sync"
"sync/atomic"
"time"
)
type ConnSet map[net.Conn]struct{}
type TCPConn struct {
sync.Mutex
conn net.Conn
writeChan chan []byte
closeFlag int32
msgParser *MsgParser
}
func freeChannel(conn *TCPConn){
for;len(conn.writeChan)>0;{
byteBuff := <- conn.writeChan
if byteBuff != nil {
conn.ReleaseReadMsg(byteBuff)
}
}
}
func newTCPConn(conn net.Conn, pendingWriteNum int, msgParser *MsgParser,writeDeadline time.Duration) *TCPConn {
tcpConn := new(TCPConn)
tcpConn.conn = conn
tcpConn.writeChan = make(chan []byte, pendingWriteNum)
tcpConn.msgParser = msgParser
go func() {
for b := range tcpConn.writeChan {
if b == nil {
break
}
conn.SetWriteDeadline(time.Now().Add(writeDeadline))
_, err := conn.Write(b)
tcpConn.msgParser.ReleaseBytes(b)
if err != nil {
break
}
}
conn.Close()
tcpConn.Lock()
freeChannel(tcpConn)
atomic.StoreInt32(&tcpConn.closeFlag,1)
tcpConn.Unlock()
}()
return tcpConn
}
func (tcpConn *TCPConn) doDestroy() {
tcpConn.conn.(*net.TCPConn).SetLinger(0)
tcpConn.conn.Close()
if atomic.LoadInt32(&tcpConn.closeFlag)==0 {
close(tcpConn.writeChan)
atomic.StoreInt32(&tcpConn.closeFlag,1)
}
}
func (tcpConn *TCPConn) Destroy() {
tcpConn.Lock()
defer tcpConn.Unlock()
tcpConn.doDestroy()
}
func (tcpConn *TCPConn) Close() {
tcpConn.Lock()
defer tcpConn.Unlock()
if atomic.LoadInt32(&tcpConn.closeFlag)==1 {
return
}
tcpConn.doWrite(nil)
atomic.StoreInt32(&tcpConn.closeFlag,1)
}
func (tcpConn *TCPConn) GetRemoteIp() string {
return tcpConn.conn.RemoteAddr().String()
}
func (tcpConn *TCPConn) doWrite(b []byte) error{
if len(tcpConn.writeChan) == cap(tcpConn.writeChan) {
tcpConn.ReleaseReadMsg(b)
log.Error("close conn: channel full")
tcpConn.doDestroy()
return errors.New("close conn: channel full")
}
tcpConn.writeChan <- b
return nil
}
// b must not be modified by the others goroutines
func (tcpConn *TCPConn) Write(b []byte) error{
tcpConn.Lock()
defer tcpConn.Unlock()
if atomic.LoadInt32(&tcpConn.closeFlag)==1 || b == nil {
tcpConn.ReleaseReadMsg(b)
return errors.New("conn is close")
}
return tcpConn.doWrite(b)
}
func (tcpConn *TCPConn) Read(b []byte) (int, error) {
return tcpConn.conn.Read(b)
}
func (tcpConn *TCPConn) LocalAddr() net.Addr {
return tcpConn.conn.LocalAddr()
}
func (tcpConn *TCPConn) RemoteAddr() net.Addr {
return tcpConn.conn.RemoteAddr()
}
func (tcpConn *TCPConn) ReadMsg() ([]byte, error) {
return tcpConn.msgParser.Read(tcpConn)
}
func (tcpConn *TCPConn) ReleaseReadMsg(byteBuff []byte){
tcpConn.msgParser.ReleaseBytes(byteBuff)
}
func (tcpConn *TCPConn) WriteMsg(args ...[]byte) error {
if atomic.LoadInt32(&tcpConn.closeFlag) == 1 {
return errors.New("conn is close")
}
return tcpConn.msgParser.Write(tcpConn, args...)
}
func (tcpConn *TCPConn) WriteRawMsg(args []byte) error {
if atomic.LoadInt32(&tcpConn.closeFlag) == 1 {
return errors.New("conn is close")
}
return tcpConn.Write(args)
}
func (tcpConn *TCPConn) IsConnected() bool {
return atomic.LoadInt32(&tcpConn.closeFlag) == 0
}
func (tcpConn *TCPConn) SetReadDeadline(d time.Duration) {
tcpConn.conn.SetReadDeadline(time.Now().Add(d))
}
func (tcpConn *TCPConn) SetWriteDeadline(d time.Duration) {
tcpConn.conn.SetWriteDeadline(time.Now().Add(d))
}

View File

@@ -3,12 +3,12 @@ package network
import (
"encoding/binary"
"errors"
"github.com/duanhf2012/origin/util/bytespool"
"github.com/duanhf2012/origin/v2/util/bytespool"
"io"
"math"
)
// --------------
// MsgParser --------------
// | len | data |
// --------------
type MsgParser struct {
@@ -17,11 +17,10 @@ type MsgParser struct {
MaxMsgLen uint32
LittleEndian bool
bytespool.IBytesMempool
bytespool.IBytesMemPool
}
func (p *MsgParser) getMaxMsgLen(lenMsgLen int) uint32 {
func (p *MsgParser) getMaxMsgLen() uint32 {
switch p.LenMsgLen {
case 1:
return math.MaxUint8
@@ -34,17 +33,17 @@ func (p *MsgParser) getMaxMsgLen(lenMsgLen int) uint32 {
}
}
func (p *MsgParser) init(){
p.IBytesMempool = bytespool.NewMemAreaPool()
func (p *MsgParser) Init() {
p.IBytesMemPool = bytespool.NewMemAreaPool()
}
// goroutine safe
func (p *MsgParser) Read(conn *TCPConn) ([]byte, error) {
func (p *MsgParser) Read(r io.Reader) ([]byte, error) {
var b [4]byte
bufMsgLen := b[:p.LenMsgLen]
// read len
if _, err := io.ReadFull(conn, bufMsgLen); err != nil {
if _, err := io.ReadFull(r, bufMsgLen); err != nil {
return nil, err
}
@@ -73,10 +72,10 @@ func (p *MsgParser) Read(conn *TCPConn) ([]byte, error) {
} else if msgLen < p.MinMsgLen {
return nil, errors.New("message too short")
}
// data
msgData := p.MakeBytes(int(msgLen))
if _, err := io.ReadFull(conn, msgData[:msgLen]); err != nil {
if _, err := io.ReadFull(r, msgData[:msgLen]); err != nil {
p.ReleaseBytes(msgData)
return nil, err
}
@@ -85,7 +84,7 @@ func (p *MsgParser) Read(conn *TCPConn) ([]byte, error) {
}
// goroutine safe
func (p *MsgParser) Write(conn *TCPConn, args ...[]byte) error {
func (p *MsgParser) Write(conn io.Writer, args ...[]byte) error {
// get len
var msgLen uint32
for i := 0; i < len(args); i++ {
@@ -100,7 +99,7 @@ func (p *MsgParser) Write(conn *TCPConn, args ...[]byte) error {
}
//msg := make([]byte, uint32(p.lenMsgLen)+msgLen)
msg := p.MakeBytes(p.LenMsgLen+int(msgLen))
msg := p.MakeBytes(p.LenMsgLen + int(msgLen))
// write len
switch p.LenMsgLen {
case 1:
@@ -130,3 +129,9 @@ func (p *MsgParser) Write(conn *TCPConn, args ...[]byte) error {
return nil
}
func (p *MsgParser) GetRecyclerReaderBytes() func(data []byte) {
return func(data []byte) {
p.IBytesMemPool.ReleaseBytes(data)
}
}

View File

@@ -1,22 +1,23 @@
package network
import (
"github.com/duanhf2012/origin/log"
"github.com/duanhf2012/origin/util/bytespool"
"errors"
"fmt"
"github.com/duanhf2012/origin/v2/log"
"github.com/duanhf2012/origin/v2/util/bytespool"
"net"
"sync"
"time"
)
const(
Default_ReadDeadline = time.Second*30 //默认读超时30s
Default_WriteDeadline = time.Second*30 //默认写超时30s
Default_MaxConnNum = 1000000 //默认最大连接数
Default_PendingWriteNum = 100000 //单连接写消息Channel容量
Default_LittleEndian = false //默认大小端
Default_MinMsgLen = 2 //最小消息长度2byte
Default_LenMsgLen = 2 //包头字段长度占用2byte
Default_MaxMsgLen = 65535 //最大消息长度
const (
Default_ReadDeadline = time.Second * 30 //默认读超时30s
Default_WriteDeadline = time.Second * 30 //默认写超时30s
Default_MaxConnNum = 1000000 //默认最大连接数
Default_PendingWriteNum = 100000 //单连接写消息Channel容量
Default_MinMsgLen = 2 //最小消息长度2byte
Default_LenMsgLen = 2 //包头字段长度占用2byte
Default_MaxMsgLen = 65535 //最大消息长度
)
type TCPServer struct {
@@ -24,37 +25,44 @@ type TCPServer struct {
MaxConnNum int
PendingWriteNum int
ReadDeadline time.Duration
WriteDeadline time.Duration
WriteDeadline time.Duration
NewAgent func(*TCPConn) Agent
ln net.Listener
conns ConnSet
mutexConns sync.Mutex
wgLn sync.WaitGroup
wgConns sync.WaitGroup
NewAgent func(conn Conn) Agent
ln net.Listener
conns ConnSet
mutexConns sync.Mutex
wgLn sync.WaitGroup
wgConns sync.WaitGroup
MsgParser
}
func (server *TCPServer) Start() {
server.init()
func (server *TCPServer) Start() error {
err := server.init()
if err != nil {
return err
}
server.wgLn.Add(1)
go server.run()
return nil
}
func (server *TCPServer) init() {
func (server *TCPServer) init() error {
ln, err := net.Listen("tcp", server.Addr)
if err != nil {
log.Fatal("Listen tcp fail",log.String("error", err.Error()))
return fmt.Errorf("listen tcp fail,error:%s", err.Error())
}
if server.MaxConnNum <= 0 {
server.MaxConnNum = Default_MaxConnNum
log.Info("invalid MaxConnNum",log.Int("reset", server.MaxConnNum))
log.Info("invalid MaxConnNum", log.Int("reset", server.MaxConnNum))
}
if server.PendingWriteNum <= 0 {
server.PendingWriteNum = Default_PendingWriteNum
log.Info("invalid PendingWriteNum",log.Int("reset", server.PendingWriteNum))
log.Info("invalid PendingWriteNum", log.Int("reset", server.PendingWriteNum))
}
if server.LenMsgLen <= 0 {
@@ -67,68 +75,70 @@ func (server *TCPServer) init() {
log.Info("invalid MaxMsgLen", log.Uint32("reset to", server.MaxMsgLen))
}
maxMsgLen := server.MsgParser.getMaxMsgLen(server.LenMsgLen)
maxMsgLen := server.MsgParser.getMaxMsgLen()
if server.MaxMsgLen > maxMsgLen {
server.MaxMsgLen = maxMsgLen
log.Info("invalid MaxMsgLen",log.Uint32("reset", maxMsgLen))
log.Info("invalid MaxMsgLen", log.Uint32("reset", maxMsgLen))
}
if server.MinMsgLen <= 0 {
server.MinMsgLen = Default_MinMsgLen
log.Info("invalid MinMsgLen",log.Uint32("reset", server.MinMsgLen))
log.Info("invalid MinMsgLen", log.Uint32("reset", server.MinMsgLen))
}
if server.WriteDeadline == 0 {
server.WriteDeadline = Default_WriteDeadline
log.Info("invalid WriteDeadline",log.Int64("reset",int64(server.WriteDeadline.Seconds())))
log.Info("invalid WriteDeadline", log.Int64("reset", int64(server.WriteDeadline.Seconds())))
}
if server.ReadDeadline == 0 {
server.ReadDeadline = Default_ReadDeadline
log.Info("invalid ReadDeadline",log.Int64("reset", int64(server.ReadDeadline.Seconds())))
log.Info("invalid ReadDeadline", log.Int64("reset", int64(server.ReadDeadline.Seconds())))
}
if server.NewAgent == nil {
log.Fatal("NewAgent must not be nil")
return errors.New("NewAgent must not be nil")
}
server.ln = ln
server.conns = make(ConnSet)
server.MsgParser.init()
server.conns = make(ConnSet, 2048)
server.MsgParser.Init()
return nil
}
func (server *TCPServer) SetNetMempool(mempool bytespool.IBytesMempool){
server.IBytesMempool = mempool
func (server *TCPServer) SetNetMemPool(memPool bytespool.IBytesMemPool) {
server.IBytesMemPool = memPool
}
func (server *TCPServer) GetNetMempool() bytespool.IBytesMempool {
return server.IBytesMempool
func (server *TCPServer) GetNetMemPool() bytespool.IBytesMemPool {
return server.IBytesMemPool
}
func (server *TCPServer) run() {
server.wgLn.Add(1)
defer server.wgLn.Done()
var tempDelay time.Duration
for {
conn, err := server.ln.Accept()
if err != nil {
if ne, ok := err.(net.Error); ok && ne.Temporary() {
var ne net.Error
if errors.As(err, &ne) && ne.Timeout() {
if tempDelay == 0 {
tempDelay = 5 * time.Millisecond
} else {
tempDelay *= 2
}
if max := 1 * time.Second; tempDelay > max {
tempDelay = max
}
log.Info("accept fail",log.String("error",err.Error()),log.Duration("sleep time", tempDelay))
log.Info("accept fail", log.String("error", err.Error()), log.Duration("sleep time", tempDelay))
tempDelay = min(1*time.Second, tempDelay)
time.Sleep(tempDelay)
continue
}
return
}
conn.(*net.TCPConn).SetLinger(0)
conn.(*net.TCPConn).SetNoDelay(true)
tempDelay = 0
@@ -136,7 +146,7 @@ func (server *TCPServer) run() {
if len(server.conns) >= server.MaxConnNum {
server.mutexConns.Unlock()
conn.Close()
log.Warning("too many connections")
log.Warn("too many connections")
continue
}
@@ -144,7 +154,7 @@ func (server *TCPServer) run() {
server.mutexConns.Unlock()
server.wgConns.Add(1)
tcpConn := newTCPConn(conn, server.PendingWriteNum, &server.MsgParser,server.WriteDeadline)
tcpConn := newNetConn(conn, server.PendingWriteNum, &server.MsgParser, server.WriteDeadline)
agent := server.NewAgent(tcpConn)
go func() {

View File

@@ -1,7 +1,7 @@
package network
import (
"github.com/duanhf2012/origin/log"
"github.com/duanhf2012/origin/v2/log"
"github.com/gorilla/websocket"
"sync"
"time"
@@ -108,7 +108,7 @@ reconnect:
client.cons[conn] = struct{}{}
client.Unlock()
wsConn := newWSConn(conn, client.PendingWriteNum, client.MaxMsgLen,client.MessageType)
wsConn := newWSConn(conn,nil, client.PendingWriteNum, client.MaxMsgLen,client.MessageType)
agent := client.NewAgent(wsConn)
agent.Run()

View File

@@ -2,9 +2,10 @@ package network
import (
"errors"
"github.com/duanhf2012/origin/log"
"github.com/duanhf2012/origin/v2/log"
"github.com/gorilla/websocket"
"net"
"net/http"
"sync"
)
@@ -16,13 +17,15 @@ type WSConn struct {
writeChan chan []byte
maxMsgLen uint32
closeFlag bool
header http.Header
}
func newWSConn(conn *websocket.Conn, pendingWriteNum int, maxMsgLen uint32,messageType int) *WSConn {
func newWSConn(conn *websocket.Conn, header http.Header, pendingWriteNum int, maxMsgLen uint32, messageType int) *WSConn {
wsConn := new(WSConn)
wsConn.conn = conn
wsConn.writeChan = make(chan []byte, pendingWriteNum)
wsConn.maxMsgLen = maxMsgLen
wsConn.header = header
go func() {
for b := range wsConn.writeChan {
@@ -46,7 +49,6 @@ func newWSConn(conn *websocket.Conn, pendingWriteNum int, maxMsgLen uint32,messa
}
func (wsConn *WSConn) doDestroy() {
wsConn.conn.UnderlyingConn().(*net.TCPConn).SetLinger(0)
wsConn.conn.Close()
if !wsConn.closeFlag {
@@ -83,6 +85,10 @@ func (wsConn *WSConn) doWrite(b []byte) {
wsConn.writeChan <- b
}
func (wsConn *WSConn) GetHeader() http.Header {
return wsConn.header
}
func (wsConn *WSConn) LocalAddr() net.Addr {
return wsConn.conn.LocalAddr()
}
@@ -91,13 +97,13 @@ func (wsConn *WSConn) RemoteAddr() net.Addr {
return wsConn.conn.RemoteAddr()
}
// goroutine not safe
// ReadMsg goroutine not safe
func (wsConn *WSConn) ReadMsg() ([]byte, error) {
_, b, err := wsConn.conn.ReadMessage()
return b, err
}
// args must not be modified by the others goroutines
// WriteMsg args must not be modified by the others goroutines
func (wsConn *WSConn) WriteMsg(args ...[]byte) error {
wsConn.Lock()
defer wsConn.Unlock()

View File

@@ -2,7 +2,8 @@ package network
import (
"crypto/tls"
"github.com/duanhf2012/origin/log"
"errors"
"github.com/duanhf2012/origin/v2/log"
"github.com/gorilla/websocket"
"net"
"net/http"
@@ -47,7 +48,7 @@ func (handler *WSHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
}
conn, err := handler.upgrader.Upgrade(w, r, nil)
if err != nil {
log.Error("upgrade fail",log.String("error",err.Error()))
log.Error("upgrade fail", log.String("error", err.Error()))
return
}
conn.SetReadLimit(int64(handler.maxMsgLen))
@@ -67,13 +68,15 @@ func (handler *WSHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if len(handler.conns) >= handler.maxConnNum {
handler.mutexConns.Unlock()
conn.Close()
log.Warning("too many connections")
log.Warn("too many connections")
return
}
handler.conns[conn] = struct{}{}
handler.mutexConns.Unlock()
wsConn := newWSConn(conn, handler.pendingWriteNum, handler.maxMsgLen, handler.messageType)
conn.UnderlyingConn().(*net.TCPConn).SetLinger(0)
conn.UnderlyingConn().(*net.TCPConn).SetNoDelay(true)
wsConn := newWSConn(conn, r.Header, handler.pendingWriteNum, handler.maxMsgLen, handler.messageType)
agent := handler.newAgent(wsConn)
agent.Run()
@@ -92,10 +95,11 @@ func (server *WSServer) SetMessageType(messageType int) {
}
}
func (server *WSServer) Start() {
func (server *WSServer) Start() error {
ln, err := net.Listen("tcp", server.Addr)
if err != nil {
log.Fatal("WSServer Listen fail",log.String("error", err.Error()))
log.Error("WSServer Listen fail", log.String("error", err.Error()))
return err
}
if server.MaxConnNum <= 0 {
@@ -115,18 +119,19 @@ func (server *WSServer) Start() {
log.Info("invalid HTTPTimeout", log.Duration("reset", server.HTTPTimeout))
}
if server.NewAgent == nil {
log.Fatal("NewAgent must not be nil")
log.Error("NewAgent must not be nil")
return errors.New("NewAgent must not be nil")
}
if server.CertFile != "" || server.KeyFile != "" {
config := &tls.Config{}
config.NextProtos = []string{"http/1.1"}
var err error
config.Certificates = make([]tls.Certificate, 1)
config.Certificates[0], err = tls.LoadX509KeyPair(server.CertFile, server.KeyFile)
if err != nil {
log.Fatal("LoadX509KeyPair fail",log.String("error", err.Error()))
log.Error("LoadX509KeyPair fail", log.String("error", err.Error()))
return err
}
ln = tls.NewListener(ln, config)
@@ -139,7 +144,7 @@ func (server *WSServer) Start() {
maxMsgLen: server.MaxMsgLen,
newAgent: server.NewAgent,
conns: make(WebsocketConnSet),
messageType:server.messageType,
messageType: server.messageType,
upgrader: websocket.Upgrader{
HandshakeTimeout: server.HTTPTimeout,
CheckOrigin: func(_ *http.Request) bool { return true },
@@ -155,6 +160,7 @@ func (server *WSServer) Start() {
}
go httpServer.Serve(ln)
return nil
}
func (server *WSServer) Close() {

View File

@@ -3,13 +3,15 @@ package node
import (
"errors"
"fmt"
"github.com/duanhf2012/origin/cluster"
"github.com/duanhf2012/origin/console"
"github.com/duanhf2012/origin/log"
"github.com/duanhf2012/origin/profiler"
"github.com/duanhf2012/origin/service"
"github.com/duanhf2012/origin/util/buildtime"
"github.com/duanhf2012/origin/util/timer"
"github.com/duanhf2012/origin/v2/cluster"
"github.com/duanhf2012/origin/v2/console"
"github.com/duanhf2012/origin/v2/log"
"github.com/duanhf2012/origin/v2/profiler"
"github.com/duanhf2012/origin/v2/service"
"github.com/duanhf2012/origin/v2/util/buildtime"
"github.com/duanhf2012/origin/v2/util/sysprocess"
"github.com/duanhf2012/origin/v2/util/timer"
"go.uber.org/zap/zapcore"
"io"
"net/http"
_ "net/http/pprof"
@@ -22,38 +24,48 @@ import (
)
var sig chan os.Signal
var nodeId int
var nodeId string
var preSetupService []service.IService //预安装
var preSetupTemplateService []func() service.IService
var profilerInterval time.Duration
var bValid bool
var configDir = "./config/"
var NodeIsRun = false
const (
SingleStop syscall.Signal = 10
SignalRetire syscall.Signal = 12
)
type BuildOSType = int8
const(
const (
Windows BuildOSType = 0
Linux BuildOSType = 1
Mac BuildOSType = 2
Linux BuildOSType = 1
Mac BuildOSType = 2
)
func init() {
sig = make(chan os.Signal, 3)
signal.Notify(sig, syscall.SIGINT, syscall.SIGTERM, syscall.Signal(10))
sig = make(chan os.Signal, 4)
signal.Notify(sig, syscall.SIGINT, syscall.SIGTERM, SingleStop, SignalRetire)
console.RegisterCommandBool("help", false, "<-help> This help.", usage)
console.RegisterCommandString("name", "", "<-name nodeName> Node's name.", setName)
console.RegisterCommandString("start", "", "<-start nodeid=nodeid> Run originserver.", startNode)
console.RegisterCommandString("stop", "", "<-stop nodeid=nodeid> Stop originserver process.", stopNode)
console.RegisterCommandString("retire", "", "<-retire nodeid=nodeid> retire originserver process.", retireNode)
console.RegisterCommandString("config", "", "<-config path> Configuration file path.", setConfigPath)
console.RegisterCommandString("console", "", "<-console true|false> Turn on or off screen log output.", openConsole)
console.RegisterCommandString("loglevel", "debug", "<-loglevel debug|release|warning|error|fatal> Set loglevel.", setLevel)
console.RegisterCommandString("loglevel", "debug", "<-loglevel debug|info|warn|error|stackerror|fatal> Set loglevel.", setLevel)
console.RegisterCommandString("logpath", "", "<-logpath path> Set log file path.", setLogPath)
console.RegisterCommandInt("logsize", 0, "<-logsize size> Set log size(MB).", setLogSize)
console.RegisterCommandInt("logchannelcap", 0, "<-logchannelcap num> Set log channel cap.", setLogChannelCapNum)
console.RegisterCommandInt("logchanlen", 0, "<-logchanlen len> Set log channel len.", setLogChanLen)
console.RegisterCommandString("pprof", "", "<-pprof ip:port> Open performance analysis.", setPprof)
}
func notifyAllServiceRetire() {
service.NotifyAllServiceRetire()
}
func usage(val interface{}) error {
ret := val.(bool)
if ret == false {
@@ -70,7 +82,7 @@ func usage(val interface{}) error {
return nil
}
func setName(val interface{}) error {
func setName(_ interface{}) error {
return nil
}
@@ -97,7 +109,7 @@ func setConfigPath(val interface{}) error {
}
_, err := os.Stat(configPath)
if err != nil {
return fmt.Errorf("Cannot find file path %s", configPath)
return fmt.Errorf("cannot find file path %s", configPath)
}
cluster.SetConfigDir(configPath)
@@ -105,8 +117,8 @@ func setConfigPath(val interface{}) error {
return nil
}
func getRunProcessPid(nodeId int) (int, error) {
f, err := os.OpenFile(fmt.Sprintf("%s_%d.pid", os.Args[0], nodeId), os.O_RDONLY, 0600)
func getRunProcessPid(nodeId string) (int, error) {
f, err := os.OpenFile(fmt.Sprintf("%s_%s.pid", os.Args[0], nodeId), os.O_RDONLY, 0600)
defer f.Close()
if err != nil {
return 0, err
@@ -120,9 +132,9 @@ func getRunProcessPid(nodeId int) (int, error) {
return strconv.Atoi(string(pidByte))
}
func writeProcessPid(nodeId int) {
func writeProcessPid(nodeId string) {
//pid
f, err := os.OpenFile(fmt.Sprintf("%s_%d.pid", os.Args[0], nodeId), os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0600)
f, err := os.OpenFile(fmt.Sprintf("%s_%s.pid", os.Args[0], nodeId), os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0600)
defer f.Close()
if err != nil {
fmt.Println(err.Error())
@@ -136,27 +148,55 @@ func writeProcessPid(nodeId int) {
}
}
func GetNodeId() int {
func GetNodeId() string {
return nodeId
}
func initNode(id int) {
func initNode(id string) {
//1.初始化集群
nodeId = id
err := cluster.GetCluster().Init(GetNodeId(), Setup)
if err != nil {
log.Fatal("read system config is error ",log.ErrorAttr("error",err))
log.Error("Init cluster fail", log.ErrorField("error", err))
os.Exit(1)
}
err = initLog()
if err != nil {
log.Error("Init log fail", log.ErrorField("error", err))
os.Exit(1)
return
}
//2.顺序安装服务
serviceOrder := cluster.GetCluster().GetLocalNodeInfo().ServiceList
for _,serviceName:= range serviceOrder{
for _, serviceName := range serviceOrder {
bSetup := false
//判断是否有配置模板服务
splitServiceName := strings.Split(serviceName, ":")
if len(splitServiceName) == 2 {
serviceName = splitServiceName[0]
templateServiceName := splitServiceName[1]
for _, newSer := range preSetupTemplateService {
ser := newSer()
ser.OnSetup(ser)
if ser.GetName() == templateServiceName {
ser.SetName(serviceName)
ser.Init(ser, cluster.GetRpcClient, cluster.GetRpcServer, cluster.GetCluster().GetServiceCfg(ser.GetName()))
service.Setup(ser)
bSetup = true
break
}
}
if bSetup == false {
log.Error("Template service not found", log.String("service name", serviceName), log.String("template service name", templateServiceName))
os.Exit(1)
}
}
for _, s := range preSetupService {
if s.GetName() != serviceName {
continue
@@ -169,27 +209,29 @@ func initNode(id int) {
}
if bSetup == false {
log.Fatal("Service name "+serviceName+" configuration error")
log.Fatal("Service name " + serviceName + " configuration error")
}
}
//3.service初始化
log.Info("Start running server.")
service.Init()
}
func initLog() error {
logger := log.GetLogger()
if log.LogPath == "" {
setLogPath("./log")
err := setLogPath("./log")
if err != nil {
return err
}
}
localnodeinfo := cluster.GetCluster().GetLocalNodeInfo()
filepre := fmt.Sprintf("%s_%d_", localnodeinfo.NodeName, localnodeinfo.NodeId)
logger, err := log.NewTextLogger(log.LogLevel,log.LogPath,filepre,true,log.LogChannelCap)
if err != nil {
fmt.Printf("cannot create log file!\n")
return err
}
log.Export(logger)
localNodeInfo := cluster.GetCluster().GetLocalNodeInfo()
fileName := fmt.Sprintf("%s.log", localNodeInfo.NodeId)
logger.FileName = fileName
logger.Init()
return nil
}
@@ -201,6 +243,34 @@ func Start() {
}
}
func retireNode(args interface{}) error {
//1.解析参数
param := args.(string)
if param == "" {
return nil
}
sParam := strings.Split(param, "=")
if len(sParam) != 2 {
return fmt.Errorf("invalid option %s", param)
}
if sParam[0] != "nodeid" {
return fmt.Errorf("invalid option %s", param)
}
nId := strings.TrimSpace(sParam[1])
if nId == "" {
return fmt.Errorf("invalid option %s", param)
}
processId, err := getRunProcessPid(nId)
if err != nil {
return err
}
RetireProcess(processId)
return nil
}
func stopNode(args interface{}) error {
//1.解析参数
param := args.(string)
@@ -215,12 +285,12 @@ func stopNode(args interface{}) error {
if sParam[0] != "nodeid" {
return fmt.Errorf("invalid option %s", param)
}
nodeId, err := strconv.Atoi(sParam[1])
if err != nil {
nId := strings.TrimSpace(sParam[1])
if nId == "" {
return fmt.Errorf("invalid option %s", param)
}
processId, err := getRunProcessPid(nodeId)
processId, err := getRunProcessPid(nId)
if err != nil {
return err
}
@@ -243,49 +313,88 @@ func startNode(args interface{}) error {
if sParam[0] != "nodeid" {
return fmt.Errorf("invalid option %s", param)
}
nodeId, err := strconv.Atoi(sParam[1])
if err != nil {
strNodeId := strings.TrimSpace(sParam[1])
if strNodeId == "" {
return fmt.Errorf("invalid option %s", param)
}
timer.StartTimer(10*time.Millisecond, 1000000)
log.Info("Start running server.")
//2.初始化node
initNode(nodeId)
for {
processId, pErr := getRunProcessPid(strNodeId)
if pErr != nil {
break
}
//3.运行service
name, cErr := sysprocess.GetProcessNameByPID(int32(processId))
myName, mErr := sysprocess.GetMyProcessName()
//当前进程名获取失败,不应该发生
if mErr != nil {
log.Error("get my process's name is error", log.ErrorField("err", mErr))
os.Exit(-1)
}
//进程id存在而且进程名也相同被认为是当前进程重复运行
if cErr == nil && name == myName {
log.Error("repeat runs are not allowed", log.String("nodeId", strNodeId), log.Int("processId", processId))
os.Exit(-1)
}
break
}
//2.记录进程id号
writeProcessPid(strNodeId)
timer.StartTimer(10*time.Millisecond, 1000000)
//3.初始化node
defer log.GetLogger().Logger.Sync()
initNode(strNodeId)
//4.运行service
service.Start()
//4.运行集群
cluster.GetCluster().Start()
//5.记录进程id号
writeProcessPid(nodeId)
//5.运行集群
err := cluster.GetCluster().Start()
if err != nil {
log.Error(err.Error())
os.Exit(-1)
}
//6.监听程序退出信号&性能报告
bRun := true
var pProfilerTicker *time.Ticker = &time.Ticker{}
if profilerInterval > 0 {
pProfilerTicker = time.NewTicker(profilerInterval)
}
for bRun {
NodeIsRun = true
for NodeIsRun {
select {
case <-sig:
log.Info("receipt stop signal.")
bRun = false
case s := <-sig:
signal := s.(syscall.Signal)
if signal == SignalRetire {
log.Info("receipt retire signal.")
notifyAllServiceRetire()
} else {
NodeIsRun = false
log.Info("receipt stop signal.")
}
case <-pProfilerTicker.C:
profiler.Report()
}
}
cluster.GetCluster().Stop()
//7.退出
service.StopAllService()
cluster.GetCluster().Stop()
log.Info("Server is stop.")
log.Close()
return nil
}
type templateServicePoint[T any] interface {
*T
service.IService
}
func Setup(s ...service.IService) {
for _, sv := range s {
sv.OnSetup(sv)
@@ -293,6 +402,19 @@ func Setup(s ...service.IService) {
}
}
func SetupTemplateFunc(fs ...func() service.IService) {
for _, f := range fs {
preSetupTemplateService = append(preSetupTemplateService, f)
}
}
func SetupTemplate[T any, P templateServicePoint[T]]() {
SetupTemplateFunc(func() service.IService {
var t T
return P(&t)
})
}
func GetService(serviceName string) service.IService {
return service.GetService(serviceName)
}
@@ -316,11 +438,13 @@ func openConsole(args interface{}) error {
}
strOpen := strings.ToLower(strings.TrimSpace(args.(string)))
if strOpen == "false" {
log.OpenConsole = false
bOpenConsole := false
log.OpenConsole = &bOpenConsole
} else if strOpen == "true" {
log.OpenConsole = true
bOpenConsole := true
log.OpenConsole = &bOpenConsole
} else {
return errors.New("Parameter console error!")
return errors.New("parameter console error")
}
return nil
}
@@ -332,20 +456,18 @@ func setLevel(args interface{}) error {
strlogLevel := strings.TrimSpace(args.(string))
switch strlogLevel {
case "trace":
log.LogLevel = log.LevelTrace
case "debug":
log.LogLevel = log.LevelDebug
log.LogLevel = zapcore.DebugLevel
case "info":
log.LogLevel = log.LevelInfo
case "warning":
log.LogLevel = log.LevelWarning
log.LogLevel = zapcore.InfoLevel
case "warn":
log.LogLevel = zapcore.WarnLevel
case "error":
log.LogLevel = log.LevelError
case "stack":
log.LogLevel = log.LevelStack
log.LogLevel = zapcore.ErrorLevel
case "stackerror":
log.LogLevel = zapcore.ErrorLevel
case "fatal":
log.LogLevel = log.LevelFatal
log.LogLevel = zapcore.FatalLevel
default:
return errors.New("unknown level: " + strlogLevel)
}
@@ -356,48 +478,42 @@ func setLogPath(args interface{}) error {
if args == "" {
return nil
}
log.LogPath = strings.TrimSpace(args.(string))
dir, err := os.Stat(log.LogPath) //这个文件夹不存在
if err == nil && dir.IsDir() == false {
return errors.New("Not found dir " + log.LogPath)
}
logPath := strings.TrimSpace(args.(string))
_, err := os.Stat(logPath)
if err != nil {
err = os.Mkdir(log.LogPath, os.ModePerm)
err = os.MkdirAll(logPath, os.ModePerm)
if err != nil {
return errors.New("Cannot create dir " + log.LogPath)
return errors.New("Cannot create dir " + logPath)
}
}
log.LogPath = logPath
return nil
}
func setLogSize(args interface{}) error {
if args == "" {
logSize, ok := args.(int)
if ok == false {
return errors.New("param logsize is error")
}
if logSize == 0 {
return nil
}
logSize,ok := args.(int)
if ok == false{
return errors.New("param logsize is error")
}
log.LogSize = int64(logSize)*1024*1024
log.MaxSize = logSize
return nil
}
func setLogChannelCapNum(args interface{}) error {
if args == "" {
func setLogChanLen(args interface{}) error {
logChanLen, ok := args.(int)
if ok == false {
return errors.New("param logsize is error")
}
if logChanLen == 0 {
return nil
}
logChannelCap,ok := args.(int)
if ok == false{
return errors.New("param logsize is error")
}
log.LogChannelCap = logChannelCap
log.LogChanLen = logChanLen
return nil
}

View File

@@ -8,7 +8,7 @@ import (
)
func KillProcess(processId int){
err := syscall.Kill(processId,syscall.Signal(10))
err := syscall.Kill(processId,SingleStop)
if err != nil {
fmt.Printf("kill processid %d is fail:%+v.\n",processId,err)
}else{
@@ -19,3 +19,12 @@ func KillProcess(processId int){
func GetBuildOSType() BuildOSType{
return Linux
}
func RetireProcess(processId int){
err := syscall.Kill(processId,SignalRetire)
if err != nil {
fmt.Printf("retire processid %d is fail:%+v.\n",processId,err)
}else{
fmt.Printf("retire processid %d is successful.\n",processId)
}
}

View File

@@ -8,7 +8,7 @@ import (
)
func KillProcess(processId int){
err := syscall.Kill(processId,syscall.Signal(10))
err := syscall.Kill(processId,SingleStop)
if err != nil {
fmt.Printf("kill processid %d is fail:%+v.\n",processId,err)
}else{
@@ -19,3 +19,12 @@ func KillProcess(processId int){
func GetBuildOSType() BuildOSType{
return Mac
}
func RetireProcess(processId int){
err := syscall.Kill(processId,SignalRetire)
if err != nil {
fmt.Printf("retire processid %d is fail:%+v.\n",processId,err)
}else{
fmt.Printf("retire processid %d is successful.\n",processId)
}
}

View File

@@ -2,10 +2,28 @@
package node
func KillProcess(processId int){
import (
"os"
"fmt"
)
func KillProcess(processId int){
procss,err := os.FindProcess(processId)
if err != nil {
fmt.Printf("kill processid %d is fail:%+v.\n",processId,err)
return
}
err = procss.Kill()
if err != nil {
fmt.Printf("kill processid %d is fail:%+v.\n",processId,err)
}
}
func GetBuildOSType() BuildOSType{
return Windows
}
func RetireProcess(processId int){
fmt.Printf("This command does not support Windows")
}

View File

@@ -2,100 +2,103 @@ package profiler
import (
"container/list"
"fmt"
"github.com/duanhf2012/origin/log"
"github.com/duanhf2012/origin/v2/log"
"sync"
"time"
)
//最大超长时间,一般可以认为是死锁或者死循环,或者极差的性能问题
var DefaultMaxOvertime time.Duration = 5*time.Second
//超过该时间将会监控报告
var DefaultOvertime time.Duration = 10*time.Millisecond
var DefaultMaxRecordNum int = 100 //最大记录条数
// DefaultMaxOvertime 最大超长时间,一般可以认为是死锁或者死循环,或者极差的性能问题
var DefaultMaxOvertime = 5 * time.Second
// DefaultOvertime 超过该时间将会监控报告
var DefaultOvertime = 10 * time.Millisecond
var DefaultMaxRecordNum = 100 //最大记录条数
var mapProfiler map[string]*Profiler
type ReportFunType func(name string,callNum int,costTime time.Duration,record *list.List)
var reportFunc ReportFunType =DefaultReportFunction
type ReportFunType func(name string, callNum int, costTime time.Duration, record *list.List)
var reportFunc ReportFunType = DefaultReportFunction
type Element struct {
tagName string
tagName string
pushTime time.Time
}
type RecordType int
const (
const (
MaxOvertimeType = 1
OvertimeType =2
)
OvertimeType = 2
)
type Record struct {
RType RecordType
CostTime time.Duration
RType RecordType
CostTime time.Duration
RecordName string
}
type Analyzer struct {
elem *list.Element
elem *list.Element
profiler *Profiler
}
type Profiler struct {
stack *list.List //Element
stack *list.List //Element
stackLocker sync.RWMutex
mapAnalyzer map[*list.Element]Analyzer
record *list.List //Record
record *list.List //Record
callNum int //调用次数
callNum int //调用次数
totalCostTime time.Duration //总消费时间长
maxOverTime time.Duration
overTime time.Duration
maxOverTime time.Duration
overTime time.Duration
maxRecordNum int
}
func init(){
func init() {
mapProfiler = map[string]*Profiler{}
}
func RegProfiler(profilerName string) *Profiler {
if _,ok :=mapProfiler[profilerName];ok==true {
if _, ok := mapProfiler[profilerName]; ok == true {
return nil
}
pProfiler := &Profiler{stack:list.New(),record:list.New(),maxOverTime: DefaultMaxOvertime,overTime: DefaultOvertime}
mapProfiler[profilerName] =pProfiler
pProfiler := &Profiler{stack: list.New(), record: list.New(), maxOverTime: DefaultMaxOvertime, overTime: DefaultOvertime}
mapProfiler[profilerName] = pProfiler
return pProfiler
}
func (slf *Profiler) SetMaxOverTime(tm time.Duration){
func (slf *Profiler) SetMaxOverTime(tm time.Duration) {
slf.maxOverTime = tm
}
func (slf *Profiler) SetOverTime(tm time.Duration){
func (slf *Profiler) SetOverTime(tm time.Duration) {
slf.overTime = tm
}
func (slf *Profiler) SetMaxRecordNum(num int){
func (slf *Profiler) SetMaxRecordNum(num int) {
slf.maxRecordNum = num
}
func (slf *Profiler) Push(tag string) *Analyzer{
func (slf *Profiler) Push(tag string) *Analyzer {
slf.stackLocker.Lock()
defer slf.stackLocker.Unlock()
pElem := slf.stack.PushBack(&Element{tagName:tag,pushTime:time.Now()})
pElem := slf.stack.PushBack(&Element{tagName: tag, pushTime: time.Now()})
return &Analyzer{elem:pElem,profiler:slf}
return &Analyzer{elem: pElem, profiler: slf}
}
func (slf *Profiler) check(pElem *Element) (*Record,time.Duration) {
func (slf *Profiler) check(pElem *Element) (*Record, time.Duration) {
if pElem == nil {
return nil,0
return nil, 0
}
subTm := time.Now().Sub(pElem.pushTime)
if subTm < slf.overTime {
return nil,subTm
return nil, subTm
}
record := Record{
@@ -104,20 +107,20 @@ func (slf *Profiler) check(pElem *Element) (*Record,time.Duration) {
RecordName: pElem.tagName,
}
if subTm>slf.maxOverTime {
if subTm > slf.maxOverTime {
record.RType = MaxOvertimeType
}
return &record,subTm
return &record, subTm
}
func (slf *Analyzer) Pop(){
func (slf *Analyzer) Pop() {
slf.profiler.stackLocker.Lock()
defer slf.profiler.stackLocker.Unlock()
pElement := slf.elem.Value.(*Element)
pElem,subTm := slf.profiler.check(pElement)
slf.profiler.callNum+=1
pElem, subTm := slf.profiler.check(pElement)
slf.profiler.callNum += 1
slf.profiler.totalCostTime += subTm
if pElem != nil {
slf.profiler.pushRecordLog(pElem)
@@ -125,10 +128,10 @@ func (slf *Analyzer) Pop(){
slf.profiler.stack.Remove(slf.elem)
}
func (slf *Profiler) pushRecordLog(record *Record){
if slf.record.Len()>= DefaultMaxRecordNum {
func (slf *Profiler) pushRecordLog(record *Record) {
if slf.record.Len() >= DefaultMaxRecordNum {
front := slf.stack.Front()
if front!=nil {
if front != nil {
slf.stack.Remove(front)
}
}
@@ -140,47 +143,43 @@ func SetReportFunction(reportFun ReportFunType) {
reportFunc = reportFun
}
func DefaultReportFunction(name string,callNum int,costTime time.Duration,record *list.List){
if record.Len()<=0 {
func DefaultReportFunction(name string, callNum int, costTime time.Duration, record *list.List) {
if record.Len() <= 0 {
return
}
var strReport string
strReport = "Profiler report tag "+name+":\n"
var average int64
if callNum>0 {
average = costTime.Milliseconds()/int64(callNum)
if callNum > 0 {
average = costTime.Milliseconds() / int64(callNum)
}
strReport += fmt.Sprintf("process count %d,take time %d Milliseconds,average %d Milliseconds/per.\n",callNum,costTime.Milliseconds(),average)
log.Info("Profiler report tag "+name, log.Int("process count", callNum), log.Int64("take time", costTime.Milliseconds()), log.Int64("average", average))
elem := record.Front()
var strTypes string
for elem!=nil {
for elem != nil {
pRecord := elem.Value.(*Record)
if pRecord.RType == MaxOvertimeType {
if pRecord.RType == MaxOvertimeType {
strTypes = "too slow process"
}else{
} else {
strTypes = "slow process"
}
strReport += fmt.Sprintf("%s:%s is take %d Milliseconds\n",strTypes,pRecord.RecordName,pRecord.CostTime.Milliseconds())
log.Info("Profiler report type", log.String("Types", strTypes), log.String("RecordName", pRecord.RecordName), log.Int64("take time", pRecord.CostTime.Milliseconds()))
elem = elem.Next()
}
log.SInfo("report",strReport)
}
func Report() {
var record *list.List
for name,prof := range mapProfiler{
for name, prof := range mapProfiler {
prof.stackLocker.RLock()
//取栈顶是否存在异常MaxOverTime数据
pElem := prof.stack.Back()
for pElem!=nil {
for pElem != nil {
pElement := pElem.Value.(*Element)
pExceptionElem,_ := prof.check(pElement)
if pExceptionElem!=nil {
pExceptionElem, _ := prof.check(pElement)
if pExceptionElem != nil {
prof.pushRecordLog(pExceptionElem)
}
pElem = pElem.Prev()
@@ -197,7 +196,6 @@ func Report() {
totalCostTime := prof.totalCostTime
prof.stackLocker.RUnlock()
DefaultReportFunction(name,callNum,totalCostTime,record)
DefaultReportFunction(name, callNum, totalCostTime, record)
}
}

144
rpc/callset.go Normal file
View File

@@ -0,0 +1,144 @@
package rpc
import (
"errors"
"github.com/duanhf2012/origin/v2/log"
"strconv"
"sync"
"sync/atomic"
"time"
)
type CallSet struct {
pendingLock sync.RWMutex
startSeq uint64
pending map[uint64]*Call
callRpcTimeout time.Duration
maxCheckCallRpcCount int
callTimerHeap CallTimerHeap
}
func (cs *CallSet) Init() {
cs.pendingLock.Lock()
cs.callTimerHeap.Init()
cs.pending = make(map[uint64]*Call, 4096)
cs.maxCheckCallRpcCount = DefaultMaxCheckCallRpcCount
cs.callRpcTimeout = DefaultRpcTimeout
go cs.checkRpcCallTimeout()
cs.pendingLock.Unlock()
}
func (cs *CallSet) makeCallFail(call *Call) {
if call.callback != nil && call.callback.IsValid() {
call.rpcHandler.PushRpcResponse(call)
} else {
call.done <- call
}
}
func (cs *CallSet) checkRpcCallTimeout() {
for {
time.Sleep(DefaultCheckRpcCallTimeoutInterval)
for i := 0; i < cs.maxCheckCallRpcCount; i++ {
cs.pendingLock.Lock()
callSeq := cs.callTimerHeap.PopTimeout()
if callSeq == 0 {
cs.pendingLock.Unlock()
break
}
pCall := cs.pending[callSeq]
if pCall == nil {
cs.pendingLock.Unlock()
log.Error("call seq is not find", log.Uint64("seq", callSeq))
continue
}
delete(cs.pending, callSeq)
strTimeout := strconv.FormatInt(int64(pCall.TimeOut.Seconds()), 10)
pCall.Err = errors.New("RPC call takes more than " + strTimeout + " seconds,method is " + pCall.ServiceMethod)
log.Error("call timeout", log.String("error", pCall.Err.Error()))
cs.makeCallFail(pCall)
cs.pendingLock.Unlock()
continue
}
}
}
func (cs *CallSet) AddPending(call *Call) {
cs.pendingLock.Lock()
if call.Seq == 0 {
cs.pendingLock.Unlock()
log.StackError("call is error.")
return
}
cs.pending[call.Seq] = call
cs.callTimerHeap.AddTimer(call.Seq, call.TimeOut)
cs.pendingLock.Unlock()
}
func (cs *CallSet) RemovePending(seq uint64) *Call {
if seq == 0 {
return nil
}
cs.pendingLock.Lock()
call := cs.removePending(seq)
cs.pendingLock.Unlock()
return call
}
func (cs *CallSet) removePending(seq uint64) *Call {
v, ok := cs.pending[seq]
if ok == false {
return nil
}
cs.callTimerHeap.Cancel(seq)
delete(cs.pending, seq)
return v
}
func (cs *CallSet) FindPending(seq uint64) (pCall *Call) {
if seq == 0 {
return nil
}
cs.pendingLock.Lock()
pCall = cs.pending[seq]
cs.pendingLock.Unlock()
return pCall
}
func (cs *CallSet) cleanPending() {
cs.pendingLock.Lock()
for {
callSeq := cs.callTimerHeap.PopFirst()
if callSeq == 0 {
break
}
pCall := cs.pending[callSeq]
if pCall == nil {
log.Error("call Seq is not find", log.Uint64("seq", callSeq))
continue
}
delete(cs.pending, callSeq)
pCall.Err = errors.New("node is disconnect ")
cs.makeCallFail(pCall)
}
cs.pendingLock.Unlock()
}
func (cs *CallSet) generateSeq() uint64 {
return atomic.AddUint64(&cs.startSeq, 1)
}

View File

@@ -2,184 +2,269 @@ package rpc
import (
"errors"
"github.com/duanhf2012/origin/network"
"fmt"
"github.com/duanhf2012/origin/v2/log"
"github.com/duanhf2012/origin/v2/network"
"reflect"
"strconv"
"sync"
"sync/atomic"
"time"
"github.com/duanhf2012/origin/log"
)
const(
const (
DefaultRpcConnNum = 1
DefaultRpcLenMsgLen = 4
DefaultRpcMinMsgLen = 2
DefaultMaxCheckCallRpcCount = 1000
DefaultMaxPendingWriteNum = 200000
DefaultMaxPendingWriteNum = 1000000
DefaultConnectInterval = 2*time.Second
DefaultCheckRpcCallTimeoutInterval = 1*time.Second
DefaultRpcTimeout = 15*time.Second
DefaultConnectInterval = 2 * time.Second
DefaultCheckRpcCallTimeoutInterval = 1 * time.Second
DefaultRpcTimeout = 15 * time.Second
)
var clientSeq uint32
type IWriter interface {
WriteMsg(nodeId string, args ...[]byte) error
IsConnected() bool
}
type IRealClient interface {
SetConn(conn *network.TCPConn)
SetConn(conn *network.NetConn)
Close(waitDone bool)
AsyncCall(timeout time.Duration,rpcHandler IRpcHandler, serviceMethod string, callback reflect.Value, args interface{}, replyParam interface{},cancelable bool) (CancelRpc,error)
Go(timeout time.Duration,rpcHandler IRpcHandler, noReply bool, serviceMethod string, args interface{}, reply interface{}) *Call
RawGo(timeout time.Duration,rpcHandler IRpcHandler,processor IRpcProcessor, noReply bool, rpcMethodId uint32, serviceMethod string, rawArgs []byte, reply interface{}) *Call
AsyncCall(NodeId string, timeout time.Duration, rpcHandler IRpcHandler, serviceMethod string, callback reflect.Value, args interface{}, replyParam interface{}) (CancelRpc, error)
Go(NodeId string, timeout time.Duration, rpcHandler IRpcHandler, noReply bool, serviceMethod string, args interface{}, reply interface{}) *Call
RawGo(NodeId string, timeout time.Duration, rpcHandler IRpcHandler, processor IRpcProcessor, noReply bool, rpcMethodId uint32, serviceMethod string, rawArgs []byte, reply interface{}) *Call
IsConnected() bool
Run()
OnClose()
Bind(server IServer)
}
type Client struct {
clientId uint32
nodeId int
pendingLock sync.RWMutex
startSeq uint64
pending map[uint64]*Call
callRpcTimeout time.Duration
maxCheckCallRpcCount int
clientId uint32
targetNodeId string
compressBytesLen int
callTimerHeap CallTimerHeap
*CallSet
IRealClient
}
func (client *Client) NewClientAgent(conn *network.TCPConn) network.Agent {
func (client *Client) NewClientAgent(conn *network.NetConn) network.Agent {
client.SetConn(conn)
return client
}
func (bc *Client) makeCallFail(call *Call) {
if call.callback != nil && call.callback.IsValid() {
call.rpcHandler.PushRpcResponse(call)
} else {
call.done <- call
}
}
func (bc *Client) checkRpcCallTimeout() {
for{
time.Sleep(DefaultCheckRpcCallTimeoutInterval)
for i := 0; i < bc.maxCheckCallRpcCount; i++ {
bc.pendingLock.Lock()
callSeq := bc.callTimerHeap.PopTimeout()
if callSeq == 0 {
bc.pendingLock.Unlock()
break
}
pCall := bc.pending[callSeq]
if pCall == nil {
bc.pendingLock.Unlock()
log.Error("call seq is not find",log.Uint64("seq", callSeq))
continue
}
delete(bc.pending,callSeq)
strTimeout := strconv.FormatInt(int64(pCall.TimeOut.Seconds()), 10)
pCall.Err = errors.New("RPC call takes more than " + strTimeout + " seconds,method is "+pCall.ServiceMethod)
log.Error("call timeout",log.String("error",pCall.Err.Error()))
bc.makeCallFail(pCall)
bc.pendingLock.Unlock()
continue
}
}
}
func (client *Client) InitPending() {
client.pendingLock.Lock()
client.callTimerHeap.Init()
client.pending = make(map[uint64]*Call,4096)
client.pendingLock.Unlock()
}
func (bc *Client) AddPending(call *Call) {
bc.pendingLock.Lock()
if call.Seq == 0 {
bc.pendingLock.Unlock()
log.Stack("call is error.")
return
}
bc.pending[call.Seq] = call
bc.callTimerHeap.AddTimer(call.Seq,call.TimeOut)
bc.pendingLock.Unlock()
}
func (bc *Client) RemovePending(seq uint64) *Call {
if seq == 0 {
return nil
}
bc.pendingLock.Lock()
call := bc.removePending(seq)
bc.pendingLock.Unlock()
return call
}
func (bc *Client) removePending(seq uint64) *Call {
v, ok := bc.pending[seq]
if ok == false {
return nil
}
bc.callTimerHeap.Cancel(seq)
delete(bc.pending, seq)
return v
}
func (bc *Client) FindPending(seq uint64) (pCall *Call) {
if seq == 0 {
return nil
}
bc.pendingLock.Lock()
pCall = bc.pending[seq]
bc.pendingLock.Unlock()
return pCall
}
func (bc *Client) cleanPending(){
bc.pendingLock.Lock()
for {
callSeq := bc.callTimerHeap.PopFirst()
if callSeq == 0 {
break
}
pCall := bc.pending[callSeq]
if pCall == nil {
log.Error("call Seq is not find",log.Uint64("seq",callSeq))
continue
}
delete(bc.pending,callSeq)
pCall.Err = errors.New("nodeid is disconnect ")
bc.makeCallFail(pCall)
}
bc.pendingLock.Unlock()
}
func (bc *Client) generateSeq() uint64 {
return atomic.AddUint64(&bc.startSeq, 1)
}
func (client *Client) GetNodeId() int {
return client.nodeId
func (client *Client) GetTargetNodeId() string {
return client.targetNodeId
}
func (client *Client) GetClientId() uint32 {
return client.clientId
}
func (client *Client) processRpcResponse(responseData []byte) error {
bCompress := (responseData[0] >> 7) > 0
processor := GetProcessor(responseData[0] & 0x7f)
if processor == nil {
//rc.conn.ReleaseReadMsg(responseData)
err := errors.New(fmt.Sprintf("cannot find process %d", responseData[0]&0x7f))
log.Error(err.Error())
return err
}
//1.解析head
response := RpcResponse{}
response.RpcResponseData = processor.MakeRpcResponse(0, "", nil)
//解压缩
byteData := responseData[1:]
var compressBuff []byte
if bCompress == true {
var unCompressErr error
compressBuff, unCompressErr = compressor.UncompressBlock(byteData)
if unCompressErr != nil {
//rc.conn.ReleaseReadMsg(responseData)
err := fmt.Errorf("uncompressBlock failed,err :%s", unCompressErr.Error())
return err
}
byteData = compressBuff
}
err := processor.Unmarshal(byteData, response.RpcResponseData)
if cap(compressBuff) > 0 {
compressor.UnCompressBufferCollection(compressBuff)
}
//rc.conn.ReleaseReadMsg(bytes)
if err != nil {
processor.ReleaseRpcResponse(response.RpcResponseData)
log.Error("rpcClient Unmarshal head error", log.ErrorField("error", err))
return nil
}
v := client.RemovePending(response.RpcResponseData.GetSeq())
if v == nil {
log.Error("rpcClient cannot find seq", log.Uint64("seq", response.RpcResponseData.GetSeq()))
} else {
v.Err = nil
if len(response.RpcResponseData.GetReply()) > 0 {
err = processor.Unmarshal(response.RpcResponseData.GetReply(), v.Reply)
if err != nil {
log.Error("rpcClient Unmarshal body failed", log.ErrorField("error", err))
v.Err = err
}
}
if response.RpcResponseData.GetErr() != nil {
v.Err = response.RpcResponseData.GetErr()
}
if v.callback != nil && v.callback.IsValid() {
v.rpcHandler.PushRpcResponse(v)
} else {
v.done <- v
}
}
processor.ReleaseRpcResponse(response.RpcResponseData)
return nil
}
//func (rc *Client) Go(timeout time.Duration,rpcHandler IRpcHandler,noReply bool, serviceMethod string, args interface{}, reply interface{}) *Call {
// _, processor := GetProcessorType(args)
// InParam, err := processor.Marshal(args)
// if err != nil {
// log.Error("Marshal is fail",log.ErrorAttr("error",err))
// call := MakeCall()
// call.DoError(err)
// return call
// }
//
// return rc.RawGo(timeout,rpcHandler,processor, noReply, 0, serviceMethod, InParam, reply)
//}
func (client *Client) rawGo(nodeId string, w IWriter, timeout time.Duration, rpcHandler IRpcHandler, processor IRpcProcessor, noReply bool, rpcMethodId uint32, serviceMethod string, rawArgs []byte, reply interface{}) *Call {
call := MakeCall()
call.ServiceMethod = serviceMethod
call.Reply = reply
call.Seq = client.generateSeq()
call.TimeOut = timeout
request := MakeRpcRequest(processor, call.Seq, rpcMethodId, serviceMethod, noReply, rawArgs)
bytes, err := processor.Marshal(request.RpcRequestData)
ReleaseRpcRequest(request)
if err != nil {
call.Seq = 0
log.Error("marshal is fail", log.String("error", err.Error()))
call.DoError(err)
return call
}
if w == nil || w.IsConnected() == false {
call.Seq = 0
sErr := errors.New(serviceMethod + " was called failed,rpc client is disconnect")
log.Error("conn is disconnect", log.String("error", sErr.Error()))
call.DoError(sErr)
return call
}
var compressBuff []byte
bCompress := uint8(0)
if client.compressBytesLen > 0 && len(bytes) >= client.compressBytesLen {
var cErr error
compressBuff, cErr = compressor.CompressBlock(bytes)
if cErr != nil {
call.Seq = 0
log.Error("compress fail", log.String("error", cErr.Error()))
call.DoError(cErr)
return call
}
if len(compressBuff) < len(bytes) {
bytes = compressBuff
bCompress = 1 << 7
}
}
if noReply == false {
client.AddPending(call)
}
err = w.WriteMsg(nodeId, []byte{uint8(processor.GetProcessorType()) | bCompress}, bytes)
if cap(compressBuff) > 0 {
compressor.CompressBufferCollection(compressBuff)
}
if err != nil {
client.RemovePending(call.Seq)
log.Error("WriteMsg is fail", log.ErrorField("error", err))
call.Seq = 0
call.DoError(err)
}
return call
}
func (client *Client) asyncCall(nodeId string, w IWriter, timeout time.Duration, rpcHandler IRpcHandler, serviceMethod string, callback reflect.Value, args interface{}, replyParam interface{}) (CancelRpc, error) {
processorType, processor := GetProcessorType(args)
InParam, herr := processor.Marshal(args)
if herr != nil {
return emptyCancelRpc, herr
}
seq := client.generateSeq()
request := MakeRpcRequest(processor, seq, 0, serviceMethod, false, InParam)
bytes, err := processor.Marshal(request.RpcRequestData)
ReleaseRpcRequest(request)
if err != nil {
return emptyCancelRpc, err
}
if w == nil || w.IsConnected() == false {
return emptyCancelRpc, errors.New("Rpc server is disconnect,call " + serviceMethod)
}
var compressBuff []byte
bCompress := uint8(0)
if client.compressBytesLen > 0 && len(bytes) >= client.compressBytesLen {
var cErr error
compressBuff, cErr = compressor.CompressBlock(bytes)
if cErr != nil {
return emptyCancelRpc, cErr
}
if len(compressBuff) < len(bytes) {
bytes = compressBuff
bCompress = 1 << 7
}
}
call := MakeCall()
call.Reply = replyParam
call.callback = &callback
call.rpcHandler = rpcHandler
call.ServiceMethod = serviceMethod
call.Seq = seq
call.TimeOut = timeout
client.AddPending(call)
err = w.WriteMsg(nodeId, []byte{uint8(processorType) | bCompress}, bytes)
if cap(compressBuff) > 0 {
compressor.CompressBufferCollection(compressBuff)
}
if err != nil {
client.RemovePending(call.Seq)
ReleaseCall(call)
return emptyCancelRpc, err
}
rpcCancel := RpcCancel{CallSeq: seq, Cli: client}
return rpcCancel.CancelRpc, nil
}

View File

@@ -3,27 +3,28 @@ package rpc
import (
"errors"
"fmt"
"github.com/duanhf2012/origin/util/bytespool"
"github.com/duanhf2012/origin/v2/util/bytespool"
"github.com/pierrec/lz4/v4"
"runtime"
)
var memPool bytespool.IBytesMempool = bytespool.NewMemAreaPool()
var memPool bytespool.IBytesMemPool = bytespool.NewMemAreaPool()
type ICompressor interface {
CompressBlock(src []byte) ([]byte, error) //dst如果有预申请使用dst内存传入nil时内部申请
UncompressBlock(src []byte) ([]byte, error) //dst如果有预申请使用dst内存传入nil时内部申请
CompressBufferCollection(buffer []byte) //压缩的Buffer内存回收
UnCompressBufferCollection(buffer []byte) //解压缩的Buffer内存回收
CompressBufferCollection(buffer []byte) //压缩的Buffer内存回收
UnCompressBufferCollection(buffer []byte) //解压缩的Buffer内存回收
}
var compressor ICompressor
func init(){
func init() {
SetCompressor(&Lz4Compressor{})
}
func SetCompressor(cp ICompressor){
func SetCompressor(cp ICompressor) {
compressor = cp
}
@@ -42,21 +43,21 @@ func (lc *Lz4Compressor) CompressBlock(src []byte) (dest []byte, err error) {
var c lz4.Compressor
var cnt int
dest = memPool.MakeBytes(lz4.CompressBlockBound(len(src))+1)
dest = memPool.MakeBytes(lz4.CompressBlockBound(len(src)) + 1)
cnt, err = c.CompressBlock(src, dest[1:])
if err != nil {
memPool.ReleaseBytes(dest)
return nil,err
return nil, err
}
ratio := len(src) / cnt
if len(src) % cnt > 0 {
if len(src)%cnt > 0 {
ratio += 1
}
if ratio > 255 {
memPool.ReleaseBytes(dest)
return nil,fmt.Errorf("Impermissible errors")
return nil, fmt.Errorf("impermissible errors")
}
dest[0] = uint8(ratio)
@@ -76,24 +77,24 @@ func (lc *Lz4Compressor) UncompressBlock(src []byte) (dest []byte, err error) {
radio := uint8(src[0])
if radio == 0 {
return nil,fmt.Errorf("Impermissible errors")
return nil, fmt.Errorf("impermissible errors")
}
dest = memPool.MakeBytes(len(src)*int(radio))
dest = memPool.MakeBytes(len(src) * int(radio))
cnt, err := lz4.UncompressBlock(src[1:], dest)
if err != nil {
memPool.ReleaseBytes(dest)
return nil,err
return nil, err
}
return dest[:cnt],nil
return dest[:cnt], nil
}
func (lc *Lz4Compressor) compressBlockBound(n int) int{
func (lc *Lz4Compressor) compressBlockBound(n int) int {
return lz4.CompressBlockBound(n)
}
func (lc *Lz4Compressor) CompressBufferCollection(buffer []byte){
func (lc *Lz4Compressor) CompressBufferCollection(buffer []byte) {
memPool.ReleaseBytes(buffer)
}

View File

@@ -1,408 +0,0 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.31.0
// protoc v3.11.4
// source: test/rpc/dynamicdiscover.proto
package rpc
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type NodeInfo struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
NodeId int32 `protobuf:"varint,1,opt,name=NodeId,proto3" json:"NodeId,omitempty"`
NodeName string `protobuf:"bytes,2,opt,name=NodeName,proto3" json:"NodeName,omitempty"`
ListenAddr string `protobuf:"bytes,3,opt,name=ListenAddr,proto3" json:"ListenAddr,omitempty"`
MaxRpcParamLen uint32 `protobuf:"varint,4,opt,name=MaxRpcParamLen,proto3" json:"MaxRpcParamLen,omitempty"`
Private bool `protobuf:"varint,5,opt,name=Private,proto3" json:"Private,omitempty"`
PublicServiceList []string `protobuf:"bytes,6,rep,name=PublicServiceList,proto3" json:"PublicServiceList,omitempty"`
}
func (x *NodeInfo) Reset() {
*x = NodeInfo{}
if protoimpl.UnsafeEnabled {
mi := &file_test_rpc_dynamicdiscover_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *NodeInfo) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NodeInfo) ProtoMessage() {}
func (x *NodeInfo) ProtoReflect() protoreflect.Message {
mi := &file_test_rpc_dynamicdiscover_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NodeInfo.ProtoReflect.Descriptor instead.
func (*NodeInfo) Descriptor() ([]byte, []int) {
return file_test_rpc_dynamicdiscover_proto_rawDescGZIP(), []int{0}
}
func (x *NodeInfo) GetNodeId() int32 {
if x != nil {
return x.NodeId
}
return 0
}
func (x *NodeInfo) GetNodeName() string {
if x != nil {
return x.NodeName
}
return ""
}
func (x *NodeInfo) GetListenAddr() string {
if x != nil {
return x.ListenAddr
}
return ""
}
func (x *NodeInfo) GetMaxRpcParamLen() uint32 {
if x != nil {
return x.MaxRpcParamLen
}
return 0
}
func (x *NodeInfo) GetPrivate() bool {
if x != nil {
return x.Private
}
return false
}
func (x *NodeInfo) GetPublicServiceList() []string {
if x != nil {
return x.PublicServiceList
}
return nil
}
// Client->Master
type ServiceDiscoverReq struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
NodeInfo *NodeInfo `protobuf:"bytes,1,opt,name=nodeInfo,proto3" json:"nodeInfo,omitempty"`
}
func (x *ServiceDiscoverReq) Reset() {
*x = ServiceDiscoverReq{}
if protoimpl.UnsafeEnabled {
mi := &file_test_rpc_dynamicdiscover_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ServiceDiscoverReq) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ServiceDiscoverReq) ProtoMessage() {}
func (x *ServiceDiscoverReq) ProtoReflect() protoreflect.Message {
mi := &file_test_rpc_dynamicdiscover_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ServiceDiscoverReq.ProtoReflect.Descriptor instead.
func (*ServiceDiscoverReq) Descriptor() ([]byte, []int) {
return file_test_rpc_dynamicdiscover_proto_rawDescGZIP(), []int{1}
}
func (x *ServiceDiscoverReq) GetNodeInfo() *NodeInfo {
if x != nil {
return x.NodeInfo
}
return nil
}
// Master->Client
type SubscribeDiscoverNotify struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
MasterNodeId int32 `protobuf:"varint,1,opt,name=MasterNodeId,proto3" json:"MasterNodeId,omitempty"`
IsFull bool `protobuf:"varint,2,opt,name=IsFull,proto3" json:"IsFull,omitempty"`
DelNodeId int32 `protobuf:"varint,3,opt,name=DelNodeId,proto3" json:"DelNodeId,omitempty"`
NodeInfo []*NodeInfo `protobuf:"bytes,4,rep,name=nodeInfo,proto3" json:"nodeInfo,omitempty"`
}
func (x *SubscribeDiscoverNotify) Reset() {
*x = SubscribeDiscoverNotify{}
if protoimpl.UnsafeEnabled {
mi := &file_test_rpc_dynamicdiscover_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *SubscribeDiscoverNotify) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SubscribeDiscoverNotify) ProtoMessage() {}
func (x *SubscribeDiscoverNotify) ProtoReflect() protoreflect.Message {
mi := &file_test_rpc_dynamicdiscover_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SubscribeDiscoverNotify.ProtoReflect.Descriptor instead.
func (*SubscribeDiscoverNotify) Descriptor() ([]byte, []int) {
return file_test_rpc_dynamicdiscover_proto_rawDescGZIP(), []int{2}
}
func (x *SubscribeDiscoverNotify) GetMasterNodeId() int32 {
if x != nil {
return x.MasterNodeId
}
return 0
}
func (x *SubscribeDiscoverNotify) GetIsFull() bool {
if x != nil {
return x.IsFull
}
return false
}
func (x *SubscribeDiscoverNotify) GetDelNodeId() int32 {
if x != nil {
return x.DelNodeId
}
return 0
}
func (x *SubscribeDiscoverNotify) GetNodeInfo() []*NodeInfo {
if x != nil {
return x.NodeInfo
}
return nil
}
// Master->Client
type Empty struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
}
func (x *Empty) Reset() {
*x = Empty{}
if protoimpl.UnsafeEnabled {
mi := &file_test_rpc_dynamicdiscover_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Empty) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Empty) ProtoMessage() {}
func (x *Empty) ProtoReflect() protoreflect.Message {
mi := &file_test_rpc_dynamicdiscover_proto_msgTypes[3]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Empty.ProtoReflect.Descriptor instead.
func (*Empty) Descriptor() ([]byte, []int) {
return file_test_rpc_dynamicdiscover_proto_rawDescGZIP(), []int{3}
}
var File_test_rpc_dynamicdiscover_proto protoreflect.FileDescriptor
var file_test_rpc_dynamicdiscover_proto_rawDesc = []byte{
0x0a, 0x1e, 0x74, 0x65, 0x73, 0x74, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x64, 0x79, 0x6e, 0x61, 0x6d,
0x69, 0x63, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x12, 0x03, 0x72, 0x70, 0x63, 0x22, 0xce, 0x01, 0x0a, 0x08, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e,
0x66, 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x18, 0x01, 0x20, 0x01,
0x28, 0x05, 0x52, 0x06, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x4e, 0x6f,
0x64, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x4e, 0x6f,
0x64, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e,
0x41, 0x64, 0x64, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x4c, 0x69, 0x73, 0x74,
0x65, 0x6e, 0x41, 0x64, 0x64, 0x72, 0x12, 0x26, 0x0a, 0x0e, 0x4d, 0x61, 0x78, 0x52, 0x70, 0x63,
0x50, 0x61, 0x72, 0x61, 0x6d, 0x4c, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0e,
0x4d, 0x61, 0x78, 0x52, 0x70, 0x63, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x4c, 0x65, 0x6e, 0x12, 0x18,
0x0a, 0x07, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52,
0x07, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x2c, 0x0a, 0x11, 0x50, 0x75, 0x62, 0x6c,
0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x18, 0x06, 0x20,
0x03, 0x28, 0x09, 0x52, 0x11, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69,
0x63, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x22, 0x3f, 0x0a, 0x12, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
0x65, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x52, 0x65, 0x71, 0x12, 0x29, 0x0a, 0x08,
0x6e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d,
0x2e, 0x72, 0x70, 0x63, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x6e,
0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x9e, 0x01, 0x0a, 0x17, 0x53, 0x75, 0x62, 0x73,
0x63, 0x72, 0x69, 0x62, 0x65, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x4e, 0x6f, 0x74,
0x69, 0x66, 0x79, 0x12, 0x22, 0x0a, 0x0c, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x6f, 0x64,
0x65, 0x49, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x4d, 0x61, 0x73, 0x74, 0x65,
0x72, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x49, 0x73, 0x46, 0x75, 0x6c,
0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x49, 0x73, 0x46, 0x75, 0x6c, 0x6c, 0x12,
0x1c, 0x0a, 0x09, 0x44, 0x65, 0x6c, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x18, 0x03, 0x20, 0x01,
0x28, 0x05, 0x52, 0x09, 0x44, 0x65, 0x6c, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x29, 0x0a,
0x08, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32,
0x0d, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08,
0x6e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x07, 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74,
0x79, 0x42, 0x07, 0x5a, 0x05, 0x2e, 0x3b, 0x72, 0x70, 0x63, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x33,
}
var (
file_test_rpc_dynamicdiscover_proto_rawDescOnce sync.Once
file_test_rpc_dynamicdiscover_proto_rawDescData = file_test_rpc_dynamicdiscover_proto_rawDesc
)
func file_test_rpc_dynamicdiscover_proto_rawDescGZIP() []byte {
file_test_rpc_dynamicdiscover_proto_rawDescOnce.Do(func() {
file_test_rpc_dynamicdiscover_proto_rawDescData = protoimpl.X.CompressGZIP(file_test_rpc_dynamicdiscover_proto_rawDescData)
})
return file_test_rpc_dynamicdiscover_proto_rawDescData
}
var file_test_rpc_dynamicdiscover_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
var file_test_rpc_dynamicdiscover_proto_goTypes = []interface{}{
(*NodeInfo)(nil), // 0: rpc.NodeInfo
(*ServiceDiscoverReq)(nil), // 1: rpc.ServiceDiscoverReq
(*SubscribeDiscoverNotify)(nil), // 2: rpc.SubscribeDiscoverNotify
(*Empty)(nil), // 3: rpc.Empty
}
var file_test_rpc_dynamicdiscover_proto_depIdxs = []int32{
0, // 0: rpc.ServiceDiscoverReq.nodeInfo:type_name -> rpc.NodeInfo
0, // 1: rpc.SubscribeDiscoverNotify.nodeInfo:type_name -> rpc.NodeInfo
2, // [2:2] is the sub-list for method output_type
2, // [2:2] is the sub-list for method input_type
2, // [2:2] is the sub-list for extension type_name
2, // [2:2] is the sub-list for extension extendee
0, // [0:2] is the sub-list for field type_name
}
func init() { file_test_rpc_dynamicdiscover_proto_init() }
func file_test_rpc_dynamicdiscover_proto_init() {
if File_test_rpc_dynamicdiscover_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_test_rpc_dynamicdiscover_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*NodeInfo); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_test_rpc_dynamicdiscover_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ServiceDiscoverReq); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_test_rpc_dynamicdiscover_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SubscribeDiscoverNotify); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_test_rpc_dynamicdiscover_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Empty); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_test_rpc_dynamicdiscover_proto_rawDesc,
NumEnums: 0,
NumMessages: 4,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_test_rpc_dynamicdiscover_proto_goTypes,
DependencyIndexes: file_test_rpc_dynamicdiscover_proto_depIdxs,
MessageInfos: file_test_rpc_dynamicdiscover_proto_msgTypes,
}.Build()
File_test_rpc_dynamicdiscover_proto = out.File
file_test_rpc_dynamicdiscover_proto_rawDesc = nil
file_test_rpc_dynamicdiscover_proto_goTypes = nil
file_test_rpc_dynamicdiscover_proto_depIdxs = nil
}

View File

@@ -1,29 +0,0 @@
syntax = "proto3";
package rpc;
option go_package = ".;rpc";
message NodeInfo{
int32 NodeId = 1;
string NodeName = 2;
string ListenAddr = 3;
uint32 MaxRpcParamLen = 4;
bool Private = 5;
repeated string PublicServiceList = 6;
}
//Client->Master
message ServiceDiscoverReq{
NodeInfo nodeInfo = 1;
}
//Master->Client
message SubscribeDiscoverNotify{
int32 MasterNodeId = 1;
bool IsFull = 2;
int32 DelNodeId = 3;
repeated NodeInfo nodeInfo = 4;
}
//Master->Client
message Empty{
}

View File

@@ -1,7 +1,7 @@
package rpc
import (
"github.com/duanhf2012/origin/util/sync"
"github.com/duanhf2012/origin/v2/util/sync"
jsoniter "github.com/json-iterator/go"
"reflect"
)

View File

@@ -2,48 +2,48 @@ package rpc
import (
"errors"
"github.com/duanhf2012/origin/log"
"github.com/duanhf2012/origin/network"
"github.com/duanhf2012/origin/v2/log"
"github.com/duanhf2012/origin/v2/network"
"reflect"
"strings"
"sync/atomic"
"time"
)
//本结点的Client
// LClient 本结点的Client
type LClient struct {
selfClient *Client
}
func (rc *LClient) Lock(){
func (lc *LClient) Lock() {
}
func (rc *LClient) Unlock(){
func (lc *LClient) Unlock() {
}
func (lc *LClient) Run(){
func (lc *LClient) Run() {
}
func (lc *LClient) OnClose(){
func (lc *LClient) OnClose() {
}
func (lc *LClient) IsConnected() bool {
return true
}
func (lc *LClient) SetConn(conn *network.TCPConn){
func (lc *LClient) SetConn(conn *network.NetConn) {
}
func (lc *LClient) Close(waitDone bool){
func (lc *LClient) Close(waitDone bool) {
}
func (lc *LClient) Go(timeout time.Duration,rpcHandler IRpcHandler,noReply bool, serviceMethod string, args interface{}, reply interface{}) *Call {
func (lc *LClient) Go(nodeId string, timeout time.Duration, rpcHandler IRpcHandler, noReply bool, serviceMethod string, args interface{}, reply interface{}) *Call {
pLocalRpcServer := rpcHandler.GetRpcServer()()
//判断是否是同一服务
findIndex := strings.Index(serviceMethod, ".")
if findIndex == -1 {
sErr := errors.New("Call serviceMethod " + serviceMethod + " is error!")
log.Error("call rpc fail",log.String("error",sErr.Error()))
log.Error("call rpc fail", log.String("error", sErr.Error()))
call := MakeCall()
call.DoError(sErr)
@@ -53,7 +53,7 @@ func (lc *LClient) Go(timeout time.Duration,rpcHandler IRpcHandler,noReply bool,
serviceName := serviceMethod[:findIndex]
if serviceName == rpcHandler.GetName() { //自己服务调用
//调用自己rpcHandler处理器
err := pLocalRpcServer.myselfRpcHandlerGo(lc.selfClient,serviceName, serviceMethod, args, requestHandlerNull,reply)
err := pLocalRpcServer.myselfRpcHandlerGo(lc.selfClient, serviceName, serviceMethod, args, requestHandlerNull, reply)
call := MakeCall()
if err != nil {
@@ -66,13 +66,12 @@ func (lc *LClient) Go(timeout time.Duration,rpcHandler IRpcHandler,noReply bool,
}
//其他的rpcHandler的处理器
return pLocalRpcServer.selfNodeRpcHandlerGo(timeout,nil, lc.selfClient, noReply, serviceName, 0, serviceMethod, args, reply, nil)
return pLocalRpcServer.selfNodeRpcHandlerGo(timeout, nil, lc.selfClient, noReply, serviceName, 0, serviceMethod, args, reply, nil)
}
func (rc *LClient) RawGo(timeout time.Duration,rpcHandler IRpcHandler,processor IRpcProcessor, noReply bool, rpcMethodId uint32, serviceName string, rawArgs []byte, reply interface{}) *Call {
func (lc *LClient) RawGo(nodeId string, timeout time.Duration, rpcHandler IRpcHandler, processor IRpcProcessor, noReply bool, rpcMethodId uint32, serviceName string, rawArgs []byte, reply interface{}) *Call {
pLocalRpcServer := rpcHandler.GetRpcServer()()
//服务自我调用
if serviceName == rpcHandler.GetName() {
call := MakeCall()
@@ -80,7 +79,7 @@ func (rc *LClient) RawGo(timeout time.Duration,rpcHandler IRpcHandler,processor
call.Reply = reply
call.TimeOut = timeout
err := pLocalRpcServer.myselfRpcHandlerGo(rc.selfClient,serviceName, serviceName, rawArgs, requestHandlerNull,nil)
err := pLocalRpcServer.myselfRpcHandlerGo(lc.selfClient, serviceName, serviceName, rawArgs, requestHandlerNull, nil)
call.Err = err
call.done <- call
@@ -88,11 +87,10 @@ func (rc *LClient) RawGo(timeout time.Duration,rpcHandler IRpcHandler,processor
}
//其他的rpcHandler的处理器
return pLocalRpcServer.selfNodeRpcHandlerGo(timeout,processor,rc.selfClient, true, serviceName, rpcMethodId, serviceName, nil, nil, rawArgs)
return pLocalRpcServer.selfNodeRpcHandlerGo(timeout, processor, lc.selfClient, true, serviceName, rpcMethodId, serviceName, nil, nil, rawArgs)
}
func (lc *LClient) AsyncCall(timeout time.Duration,rpcHandler IRpcHandler, serviceMethod string, callback reflect.Value, args interface{}, reply interface{},cancelable bool) (CancelRpc,error) {
func (lc *LClient) AsyncCall(nodeId string, timeout time.Duration, rpcHandler IRpcHandler, serviceMethod string, callback reflect.Value, args interface{}, reply interface{}) (CancelRpc, error) {
pLocalRpcServer := rpcHandler.GetRpcServer()()
//判断是否是同一服务
@@ -100,36 +98,35 @@ func (lc *LClient) AsyncCall(timeout time.Duration,rpcHandler IRpcHandler, servi
if findIndex == -1 {
err := errors.New("Call serviceMethod " + serviceMethod + " is error!")
callback.Call([]reflect.Value{reflect.ValueOf(reply), reflect.ValueOf(err)})
log.Error("serviceMethod format is error",log.String("error",err.Error()))
return emptyCancelRpc,nil
log.Error("serviceMethod format is error", log.String("error", err.Error()))
return emptyCancelRpc, nil
}
serviceName := serviceMethod[:findIndex]
//调用自己rpcHandler处理器
if serviceName == rpcHandler.GetName() { //自己服务调用
return emptyCancelRpc,pLocalRpcServer.myselfRpcHandlerGo(lc.selfClient,serviceName, serviceMethod, args,callback ,reply)
return emptyCancelRpc, pLocalRpcServer.myselfRpcHandlerGo(lc.selfClient, serviceName, serviceMethod, args, callback, reply)
}
//其他的rpcHandler的处理器
calcelRpc,err := pLocalRpcServer.selfNodeRpcHandlerAsyncGo(timeout,lc.selfClient, rpcHandler, false, serviceName, serviceMethod, args, reply, callback,cancelable)
cancelRpc, err := pLocalRpcServer.selfNodeRpcHandlerAsyncGo(timeout, lc.selfClient, rpcHandler, false, serviceName, serviceMethod, args, reply, callback)
if err != nil {
callback.Call([]reflect.Value{reflect.ValueOf(reply), reflect.ValueOf(err)})
}
return calcelRpc,nil
return cancelRpc, nil
}
func NewLClient(nodeId int) *Client{
func NewLClient(localNodeId string, callSet *CallSet) *Client {
client := &Client{}
client.clientId = atomic.AddUint32(&clientSeq, 1)
client.nodeId = nodeId
client.maxCheckCallRpcCount = DefaultMaxCheckCallRpcCount
client.callRpcTimeout = DefaultRpcTimeout
client.targetNodeId = localNodeId
lClient := &LClient{}
lClient.selfClient = client
client.IRealClient = lClient
client.InitPending()
go client.checkRpcCallTimeout()
client.CallSet = callSet
return client
}
func (lc *LClient) Bind(_ IServer) {
}

291
rpc/lserver.go Normal file
View File

@@ -0,0 +1,291 @@
package rpc
import (
"errors"
"fmt"
"github.com/duanhf2012/origin/v2/log"
"reflect"
"strings"
"time"
)
type BaseServer struct {
localNodeId string
compressBytesLen int
rpcHandleFinder RpcHandleFinder
iServer IServer
}
func (server *BaseServer) initBaseServer(compressBytesLen int, rpcHandleFinder RpcHandleFinder) {
server.compressBytesLen = compressBytesLen
server.rpcHandleFinder = rpcHandleFinder
}
func (server *BaseServer) myselfRpcHandlerGo(client *Client, handlerName string, serviceMethod string, args interface{}, callBack reflect.Value, reply interface{}) error {
rpcHandler := server.rpcHandleFinder.FindRpcHandler(handlerName)
if rpcHandler == nil {
err := errors.New("service method " + serviceMethod + " not config!")
log.Error("service method not config", log.String("serviceMethod", serviceMethod))
return err
}
return rpcHandler.CallMethod(client, serviceMethod, args, callBack, reply)
}
func (server *BaseServer) selfNodeRpcHandlerGo(timeout time.Duration, processor IRpcProcessor, client *Client, noReply bool, handlerName string, rpcMethodId uint32, serviceMethod string, args interface{}, reply interface{}, rawArgs []byte) *Call {
pCall := MakeCall()
pCall.Seq = client.generateSeq()
pCall.TimeOut = timeout
pCall.ServiceMethod = serviceMethod
rpcHandler := server.rpcHandleFinder.FindRpcHandler(handlerName)
if rpcHandler == nil {
err := errors.New("service method " + serviceMethod + " not config!")
log.Error("service method not config", log.String("serviceMethod", serviceMethod), log.ErrorField("error", err))
pCall.Seq = 0
pCall.DoError(err)
return pCall
}
var iParam interface{}
if processor == nil {
_, processor = GetProcessorType(args)
}
if args != nil {
var err error
iParam, err = processor.Clone(args)
if err != nil {
sErr := errors.New("RpcHandler " + handlerName + "." + serviceMethod + " deep copy inParam is error:" + err.Error())
log.Error("deep copy inParam is failed", log.String("handlerName", handlerName), log.String("serviceMethod", serviceMethod))
pCall.Seq = 0
pCall.DoError(sErr)
return pCall
}
}
req := MakeRpcRequest(processor, 0, rpcMethodId, serviceMethod, noReply, nil)
req.inParam = iParam
req.localReply = reply
if rawArgs != nil {
var err error
req.inParam, err = rpcHandler.UnmarshalInParam(processor, serviceMethod, rpcMethodId, rawArgs)
if err != nil {
log.Error("unmarshalInParam is failed", log.String("serviceMethod", serviceMethod), log.Uint32("rpcMethodId", rpcMethodId), log.ErrorField("error", err))
pCall.Seq = 0
pCall.DoError(err)
ReleaseRpcRequest(req)
return pCall
}
}
if noReply == false {
client.AddPending(pCall)
callSeq := pCall.Seq
req.requestHandle = func(Returns interface{}, Err RpcError) {
if reply != nil && Returns != reply && Returns != nil {
byteReturns, err := req.rpcProcessor.Marshal(Returns)
if err != nil {
Err = ConvertError(err)
log.Error("returns data cannot be marshal", log.Uint64("seq", callSeq), log.ErrorField("error", err))
} else {
err = req.rpcProcessor.Unmarshal(byteReturns, reply)
if err != nil {
Err = ConvertError(err)
log.Error("returns data cannot be Unmarshal", log.Uint64("seq", callSeq), log.ErrorField("error", err))
}
}
}
ReleaseRpcRequest(req)
v := client.RemovePending(callSeq)
if v == nil {
log.Error("rpcClient cannot find seq", log.Uint64("seq", callSeq))
return
}
if len(Err) == 0 {
v.Err = nil
v.DoOK()
} else {
log.Error(Err.Error())
v.DoError(Err)
}
}
}
err := rpcHandler.PushRpcRequest(req)
if err != nil {
log.Error(err.Error())
pCall.DoError(err)
ReleaseRpcRequest(req)
}
return pCall
}
func (server *BaseServer) selfNodeRpcHandlerAsyncGo(timeout time.Duration, client *Client, callerRpcHandler IRpcHandler, noReply bool, handlerName string, serviceMethod string, args interface{}, reply interface{}, callback reflect.Value) (CancelRpc, error) {
rpcHandler := server.rpcHandleFinder.FindRpcHandler(handlerName)
if rpcHandler == nil {
err := errors.New("service method " + serviceMethod + " not config!")
log.Error(err.Error())
return emptyCancelRpc, err
}
_, processor := GetProcessorType(args)
iParam, err := processor.Clone(args)
if err != nil {
errM := errors.New("RpcHandler " + handlerName + "." + serviceMethod + " deep copy inParam is error:" + err.Error())
log.Error(errM.Error())
return emptyCancelRpc, errM
}
req := MakeRpcRequest(processor, 0, 0, serviceMethod, noReply, nil)
req.inParam = iParam
req.localReply = reply
cancelRpc := emptyCancelRpc
var callSeq uint64
if noReply == false {
callSeq = client.generateSeq()
pCall := MakeCall()
pCall.Seq = callSeq
pCall.rpcHandler = callerRpcHandler
pCall.callback = &callback
pCall.Reply = reply
pCall.ServiceMethod = serviceMethod
pCall.TimeOut = timeout
client.AddPending(pCall)
rpcCancel := RpcCancel{CallSeq: callSeq, Cli: client}
cancelRpc = rpcCancel.CancelRpc
req.requestHandle = func(Returns interface{}, Err RpcError) {
v := client.RemovePending(callSeq)
if v == nil {
ReleaseRpcRequest(req)
return
}
if len(Err) == 0 {
v.Err = nil
} else {
v.Err = Err
}
if Returns != nil {
v.Reply = Returns
}
v.rpcHandler.PushRpcResponse(v)
ReleaseRpcRequest(req)
}
}
err = rpcHandler.PushRpcRequest(req)
if err != nil {
ReleaseRpcRequest(req)
if callSeq > 0 {
client.RemovePending(callSeq)
}
return emptyCancelRpc, err
}
return cancelRpc, nil
}
func (server *BaseServer) processRpcRequest(data []byte, connTag string, wrResponse writeResponse) error {
bCompress := (data[0] >> 7) > 0
processor := GetProcessor(data[0] & 0x7f)
if processor == nil {
return errors.New("cannot find processor")
}
//解析head
var compressBuff []byte
byteData := data[1:]
if bCompress == true {
var unCompressErr error
compressBuff, unCompressErr = compressor.UncompressBlock(byteData)
if unCompressErr != nil {
return errors.New("uncompressBlock failed")
}
byteData = compressBuff
}
req := MakeRpcRequest(processor, 0, 0, "", false, nil)
err := processor.Unmarshal(byteData, req.RpcRequestData)
if cap(compressBuff) > 0 {
compressor.UnCompressBufferCollection(compressBuff)
}
if err != nil {
if req.RpcRequestData.GetSeq() > 0 {
rpcError := RpcError(err.Error())
if req.RpcRequestData.IsNoReply() == false {
wrResponse(processor, connTag, req.RpcRequestData.GetServiceMethod(), req.RpcRequestData.GetSeq(), nil, rpcError)
}
}
ReleaseRpcRequest(req)
return err
}
//交给程序处理
serviceMethod := strings.Split(req.RpcRequestData.GetServiceMethod(), ".")
if len(serviceMethod) < 1 {
rpcError := RpcError("rpc request req.ServiceMethod is error")
if req.RpcRequestData.IsNoReply() == false {
wrResponse(processor, connTag, req.RpcRequestData.GetServiceMethod(), req.RpcRequestData.GetSeq(), nil, rpcError)
}
ReleaseRpcRequest(req)
log.Error("rpc request req.ServiceMethod is error")
return nil
}
rpcHandler := server.rpcHandleFinder.FindRpcHandler(serviceMethod[0])
if rpcHandler == nil {
rpcError := RpcError(fmt.Sprintf("service method %s not config!", req.RpcRequestData.GetServiceMethod()))
if req.RpcRequestData.IsNoReply() == false {
wrResponse(processor, connTag, req.RpcRequestData.GetServiceMethod(), req.RpcRequestData.GetSeq(), nil, rpcError)
}
log.Error("serviceMethod not config", log.String("serviceMethod", req.RpcRequestData.GetServiceMethod()))
ReleaseRpcRequest(req)
return nil
}
if req.RpcRequestData.IsNoReply() == false {
req.requestHandle = func(Returns interface{}, Err RpcError) {
wrResponse(processor, connTag, req.RpcRequestData.GetServiceMethod(), req.RpcRequestData.GetSeq(), Returns, Err)
ReleaseRpcRequest(req)
}
}
req.inParam, err = rpcHandler.UnmarshalInParam(req.rpcProcessor, req.RpcRequestData.GetServiceMethod(), req.RpcRequestData.GetRpcMethodId(), req.RpcRequestData.GetInParam())
if err != nil {
rErr := "Call Rpc " + req.RpcRequestData.GetServiceMethod() + " Param error " + err.Error()
log.Error("call rpc param error", log.String("serviceMethod", req.RpcRequestData.GetServiceMethod()), log.ErrorField("error", err))
if req.requestHandle != nil {
req.requestHandle(nil, RpcError(rErr))
} else {
ReleaseRpcRequest(req)
}
return nil
}
err = rpcHandler.PushRpcRequest(req)
if err != nil {
rpcError := RpcError(err.Error())
if req.RpcRequestData.IsNoReply() {
wrResponse(processor, connTag, req.RpcRequestData.GetServiceMethod(), req.RpcRequestData.GetSeq(), nil, rpcError)
}
ReleaseRpcRequest(req)
}
return nil
}

View File

@@ -1,8 +1,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.31.0
// protoc v3.11.4
// source: test/rpc/messagequeue.proto
// protoc v4.24.0
// source: rpcproto/messagequeue.proto
package rpc
@@ -50,11 +50,11 @@ func (x SubscribeType) String() string {
}
func (SubscribeType) Descriptor() protoreflect.EnumDescriptor {
return file_test_rpc_messagequeue_proto_enumTypes[0].Descriptor()
return file_rpcproto_messagequeue_proto_enumTypes[0].Descriptor()
}
func (SubscribeType) Type() protoreflect.EnumType {
return &file_test_rpc_messagequeue_proto_enumTypes[0]
return &file_rpcproto_messagequeue_proto_enumTypes[0]
}
func (x SubscribeType) Number() protoreflect.EnumNumber {
@@ -63,7 +63,7 @@ func (x SubscribeType) Number() protoreflect.EnumNumber {
// Deprecated: Use SubscribeType.Descriptor instead.
func (SubscribeType) EnumDescriptor() ([]byte, []int) {
return file_test_rpc_messagequeue_proto_rawDescGZIP(), []int{0}
return file_rpcproto_messagequeue_proto_rawDescGZIP(), []int{0}
}
type SubscribeMethod int32
@@ -96,11 +96,11 @@ func (x SubscribeMethod) String() string {
}
func (SubscribeMethod) Descriptor() protoreflect.EnumDescriptor {
return file_test_rpc_messagequeue_proto_enumTypes[1].Descriptor()
return file_rpcproto_messagequeue_proto_enumTypes[1].Descriptor()
}
func (SubscribeMethod) Type() protoreflect.EnumType {
return &file_test_rpc_messagequeue_proto_enumTypes[1]
return &file_rpcproto_messagequeue_proto_enumTypes[1]
}
func (x SubscribeMethod) Number() protoreflect.EnumNumber {
@@ -109,7 +109,7 @@ func (x SubscribeMethod) Number() protoreflect.EnumNumber {
// Deprecated: Use SubscribeMethod.Descriptor instead.
func (SubscribeMethod) EnumDescriptor() ([]byte, []int) {
return file_test_rpc_messagequeue_proto_rawDescGZIP(), []int{1}
return file_rpcproto_messagequeue_proto_rawDescGZIP(), []int{1}
}
type DBQueuePopReq struct {
@@ -127,7 +127,7 @@ type DBQueuePopReq struct {
func (x *DBQueuePopReq) Reset() {
*x = DBQueuePopReq{}
if protoimpl.UnsafeEnabled {
mi := &file_test_rpc_messagequeue_proto_msgTypes[0]
mi := &file_rpcproto_messagequeue_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -140,7 +140,7 @@ func (x *DBQueuePopReq) String() string {
func (*DBQueuePopReq) ProtoMessage() {}
func (x *DBQueuePopReq) ProtoReflect() protoreflect.Message {
mi := &file_test_rpc_messagequeue_proto_msgTypes[0]
mi := &file_rpcproto_messagequeue_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -153,7 +153,7 @@ func (x *DBQueuePopReq) ProtoReflect() protoreflect.Message {
// Deprecated: Use DBQueuePopReq.ProtoReflect.Descriptor instead.
func (*DBQueuePopReq) Descriptor() ([]byte, []int) {
return file_test_rpc_messagequeue_proto_rawDescGZIP(), []int{0}
return file_rpcproto_messagequeue_proto_rawDescGZIP(), []int{0}
}
func (x *DBQueuePopReq) GetCustomerId() string {
@@ -203,7 +203,7 @@ type DBQueuePopRes struct {
func (x *DBQueuePopRes) Reset() {
*x = DBQueuePopRes{}
if protoimpl.UnsafeEnabled {
mi := &file_test_rpc_messagequeue_proto_msgTypes[1]
mi := &file_rpcproto_messagequeue_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -216,7 +216,7 @@ func (x *DBQueuePopRes) String() string {
func (*DBQueuePopRes) ProtoMessage() {}
func (x *DBQueuePopRes) ProtoReflect() protoreflect.Message {
mi := &file_test_rpc_messagequeue_proto_msgTypes[1]
mi := &file_rpcproto_messagequeue_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -229,7 +229,7 @@ func (x *DBQueuePopRes) ProtoReflect() protoreflect.Message {
// Deprecated: Use DBQueuePopRes.ProtoReflect.Descriptor instead.
func (*DBQueuePopRes) Descriptor() ([]byte, []int) {
return file_test_rpc_messagequeue_proto_rawDescGZIP(), []int{1}
return file_rpcproto_messagequeue_proto_rawDescGZIP(), []int{1}
}
func (x *DBQueuePopRes) GetQueueName() string {
@@ -255,7 +255,7 @@ type DBQueueSubscribeReq struct {
SubType SubscribeType `protobuf:"varint,1,opt,name=SubType,proto3,enum=SubscribeType" json:"SubType,omitempty"` //订阅类型
Method SubscribeMethod `protobuf:"varint,2,opt,name=Method,proto3,enum=SubscribeMethod" json:"Method,omitempty"` //订阅方法
CustomerId string `protobuf:"bytes,3,opt,name=CustomerId,proto3" json:"CustomerId,omitempty"` //消费者Id
FromNodeId int32 `protobuf:"varint,4,opt,name=FromNodeId,proto3" json:"FromNodeId,omitempty"`
FromNodeId string `protobuf:"bytes,4,opt,name=FromNodeId,proto3" json:"FromNodeId,omitempty"`
RpcMethod string `protobuf:"bytes,5,opt,name=RpcMethod,proto3" json:"RpcMethod,omitempty"`
TopicName string `protobuf:"bytes,6,opt,name=TopicName,proto3" json:"TopicName,omitempty"` //主题名称
StartIndex uint64 `protobuf:"varint,7,opt,name=StartIndex,proto3" json:"StartIndex,omitempty"` //开始位置 ,格式前4位是时间戳秒后面是序号。如果填0时服务自动修改成(4bit 当前时间秒)| (0000 4bit)
@@ -265,7 +265,7 @@ type DBQueueSubscribeReq struct {
func (x *DBQueueSubscribeReq) Reset() {
*x = DBQueueSubscribeReq{}
if protoimpl.UnsafeEnabled {
mi := &file_test_rpc_messagequeue_proto_msgTypes[2]
mi := &file_rpcproto_messagequeue_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -278,7 +278,7 @@ func (x *DBQueueSubscribeReq) String() string {
func (*DBQueueSubscribeReq) ProtoMessage() {}
func (x *DBQueueSubscribeReq) ProtoReflect() protoreflect.Message {
mi := &file_test_rpc_messagequeue_proto_msgTypes[2]
mi := &file_rpcproto_messagequeue_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -291,7 +291,7 @@ func (x *DBQueueSubscribeReq) ProtoReflect() protoreflect.Message {
// Deprecated: Use DBQueueSubscribeReq.ProtoReflect.Descriptor instead.
func (*DBQueueSubscribeReq) Descriptor() ([]byte, []int) {
return file_test_rpc_messagequeue_proto_rawDescGZIP(), []int{2}
return file_rpcproto_messagequeue_proto_rawDescGZIP(), []int{2}
}
func (x *DBQueueSubscribeReq) GetSubType() SubscribeType {
@@ -315,11 +315,11 @@ func (x *DBQueueSubscribeReq) GetCustomerId() string {
return ""
}
func (x *DBQueueSubscribeReq) GetFromNodeId() int32 {
func (x *DBQueueSubscribeReq) GetFromNodeId() string {
if x != nil {
return x.FromNodeId
}
return 0
return ""
}
func (x *DBQueueSubscribeReq) GetRpcMethod() string {
@@ -359,7 +359,7 @@ type DBQueueSubscribeRes struct {
func (x *DBQueueSubscribeRes) Reset() {
*x = DBQueueSubscribeRes{}
if protoimpl.UnsafeEnabled {
mi := &file_test_rpc_messagequeue_proto_msgTypes[3]
mi := &file_rpcproto_messagequeue_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -372,7 +372,7 @@ func (x *DBQueueSubscribeRes) String() string {
func (*DBQueueSubscribeRes) ProtoMessage() {}
func (x *DBQueueSubscribeRes) ProtoReflect() protoreflect.Message {
mi := &file_test_rpc_messagequeue_proto_msgTypes[3]
mi := &file_rpcproto_messagequeue_proto_msgTypes[3]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -385,7 +385,7 @@ func (x *DBQueueSubscribeRes) ProtoReflect() protoreflect.Message {
// Deprecated: Use DBQueueSubscribeRes.ProtoReflect.Descriptor instead.
func (*DBQueueSubscribeRes) Descriptor() ([]byte, []int) {
return file_test_rpc_messagequeue_proto_rawDescGZIP(), []int{3}
return file_rpcproto_messagequeue_proto_rawDescGZIP(), []int{3}
}
type DBQueuePublishReq struct {
@@ -400,7 +400,7 @@ type DBQueuePublishReq struct {
func (x *DBQueuePublishReq) Reset() {
*x = DBQueuePublishReq{}
if protoimpl.UnsafeEnabled {
mi := &file_test_rpc_messagequeue_proto_msgTypes[4]
mi := &file_rpcproto_messagequeue_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -413,7 +413,7 @@ func (x *DBQueuePublishReq) String() string {
func (*DBQueuePublishReq) ProtoMessage() {}
func (x *DBQueuePublishReq) ProtoReflect() protoreflect.Message {
mi := &file_test_rpc_messagequeue_proto_msgTypes[4]
mi := &file_rpcproto_messagequeue_proto_msgTypes[4]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -426,7 +426,7 @@ func (x *DBQueuePublishReq) ProtoReflect() protoreflect.Message {
// Deprecated: Use DBQueuePublishReq.ProtoReflect.Descriptor instead.
func (*DBQueuePublishReq) Descriptor() ([]byte, []int) {
return file_test_rpc_messagequeue_proto_rawDescGZIP(), []int{4}
return file_rpcproto_messagequeue_proto_rawDescGZIP(), []int{4}
}
func (x *DBQueuePublishReq) GetTopicName() string {
@@ -452,7 +452,7 @@ type DBQueuePublishRes struct {
func (x *DBQueuePublishRes) Reset() {
*x = DBQueuePublishRes{}
if protoimpl.UnsafeEnabled {
mi := &file_test_rpc_messagequeue_proto_msgTypes[5]
mi := &file_rpcproto_messagequeue_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -465,7 +465,7 @@ func (x *DBQueuePublishRes) String() string {
func (*DBQueuePublishRes) ProtoMessage() {}
func (x *DBQueuePublishRes) ProtoReflect() protoreflect.Message {
mi := &file_test_rpc_messagequeue_proto_msgTypes[5]
mi := &file_rpcproto_messagequeue_proto_msgTypes[5]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -478,13 +478,13 @@ func (x *DBQueuePublishRes) ProtoReflect() protoreflect.Message {
// Deprecated: Use DBQueuePublishRes.ProtoReflect.Descriptor instead.
func (*DBQueuePublishRes) Descriptor() ([]byte, []int) {
return file_test_rpc_messagequeue_proto_rawDescGZIP(), []int{5}
return file_rpcproto_messagequeue_proto_rawDescGZIP(), []int{5}
}
var File_test_rpc_messagequeue_proto protoreflect.FileDescriptor
var File_rpcproto_messagequeue_proto protoreflect.FileDescriptor
var file_test_rpc_messagequeue_proto_rawDesc = []byte{
0x0a, 0x1b, 0x74, 0x65, 0x73, 0x74, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61,
var file_rpcproto_messagequeue_proto_rawDesc = []byte{
0x0a, 0x1b, 0x72, 0x70, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61,
0x67, 0x65, 0x71, 0x75, 0x65, 0x75, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa3, 0x01,
0x0a, 0x0d, 0x44, 0x42, 0x51, 0x75, 0x65, 0x75, 0x65, 0x50, 0x6f, 0x70, 0x52, 0x65, 0x71, 0x12,
0x1e, 0x0a, 0x0a, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x49, 0x64, 0x18, 0x01, 0x20,
@@ -510,7 +510,7 @@ var file_test_rpc_messagequeue_proto_rawDesc = []byte{
0x6f, 0x64, 0x52, 0x06, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x43, 0x75,
0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x49, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a,
0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x46, 0x72,
0x6f, 0x6d, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a,
0x6f, 0x6d, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a,
0x46, 0x72, 0x6f, 0x6d, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x52, 0x70,
0x63, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x52,
0x70, 0x63, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x54, 0x6f, 0x70, 0x69,
@@ -539,20 +539,20 @@ var file_test_rpc_messagequeue_proto_rawDesc = []byte{
}
var (
file_test_rpc_messagequeue_proto_rawDescOnce sync.Once
file_test_rpc_messagequeue_proto_rawDescData = file_test_rpc_messagequeue_proto_rawDesc
file_rpcproto_messagequeue_proto_rawDescOnce sync.Once
file_rpcproto_messagequeue_proto_rawDescData = file_rpcproto_messagequeue_proto_rawDesc
)
func file_test_rpc_messagequeue_proto_rawDescGZIP() []byte {
file_test_rpc_messagequeue_proto_rawDescOnce.Do(func() {
file_test_rpc_messagequeue_proto_rawDescData = protoimpl.X.CompressGZIP(file_test_rpc_messagequeue_proto_rawDescData)
func file_rpcproto_messagequeue_proto_rawDescGZIP() []byte {
file_rpcproto_messagequeue_proto_rawDescOnce.Do(func() {
file_rpcproto_messagequeue_proto_rawDescData = protoimpl.X.CompressGZIP(file_rpcproto_messagequeue_proto_rawDescData)
})
return file_test_rpc_messagequeue_proto_rawDescData
return file_rpcproto_messagequeue_proto_rawDescData
}
var file_test_rpc_messagequeue_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
var file_test_rpc_messagequeue_proto_msgTypes = make([]protoimpl.MessageInfo, 6)
var file_test_rpc_messagequeue_proto_goTypes = []interface{}{
var file_rpcproto_messagequeue_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
var file_rpcproto_messagequeue_proto_msgTypes = make([]protoimpl.MessageInfo, 6)
var file_rpcproto_messagequeue_proto_goTypes = []interface{}{
(SubscribeType)(0), // 0: SubscribeType
(SubscribeMethod)(0), // 1: SubscribeMethod
(*DBQueuePopReq)(nil), // 2: DBQueuePopReq
@@ -562,7 +562,7 @@ var file_test_rpc_messagequeue_proto_goTypes = []interface{}{
(*DBQueuePublishReq)(nil), // 6: DBQueuePublishReq
(*DBQueuePublishRes)(nil), // 7: DBQueuePublishRes
}
var file_test_rpc_messagequeue_proto_depIdxs = []int32{
var file_rpcproto_messagequeue_proto_depIdxs = []int32{
0, // 0: DBQueueSubscribeReq.SubType:type_name -> SubscribeType
1, // 1: DBQueueSubscribeReq.Method:type_name -> SubscribeMethod
2, // [2:2] is the sub-list for method output_type
@@ -572,13 +572,13 @@ var file_test_rpc_messagequeue_proto_depIdxs = []int32{
0, // [0:2] is the sub-list for field type_name
}
func init() { file_test_rpc_messagequeue_proto_init() }
func file_test_rpc_messagequeue_proto_init() {
if File_test_rpc_messagequeue_proto != nil {
func init() { file_rpcproto_messagequeue_proto_init() }
func file_rpcproto_messagequeue_proto_init() {
if File_rpcproto_messagequeue_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_test_rpc_messagequeue_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
file_rpcproto_messagequeue_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*DBQueuePopReq); i {
case 0:
return &v.state
@@ -590,7 +590,7 @@ func file_test_rpc_messagequeue_proto_init() {
return nil
}
}
file_test_rpc_messagequeue_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
file_rpcproto_messagequeue_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*DBQueuePopRes); i {
case 0:
return &v.state
@@ -602,7 +602,7 @@ func file_test_rpc_messagequeue_proto_init() {
return nil
}
}
file_test_rpc_messagequeue_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
file_rpcproto_messagequeue_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*DBQueueSubscribeReq); i {
case 0:
return &v.state
@@ -614,7 +614,7 @@ func file_test_rpc_messagequeue_proto_init() {
return nil
}
}
file_test_rpc_messagequeue_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
file_rpcproto_messagequeue_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*DBQueueSubscribeRes); i {
case 0:
return &v.state
@@ -626,7 +626,7 @@ func file_test_rpc_messagequeue_proto_init() {
return nil
}
}
file_test_rpc_messagequeue_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
file_rpcproto_messagequeue_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*DBQueuePublishReq); i {
case 0:
return &v.state
@@ -638,7 +638,7 @@ func file_test_rpc_messagequeue_proto_init() {
return nil
}
}
file_test_rpc_messagequeue_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
file_rpcproto_messagequeue_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*DBQueuePublishRes); i {
case 0:
return &v.state
@@ -655,19 +655,19 @@ func file_test_rpc_messagequeue_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_test_rpc_messagequeue_proto_rawDesc,
RawDescriptor: file_rpcproto_messagequeue_proto_rawDesc,
NumEnums: 2,
NumMessages: 6,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_test_rpc_messagequeue_proto_goTypes,
DependencyIndexes: file_test_rpc_messagequeue_proto_depIdxs,
EnumInfos: file_test_rpc_messagequeue_proto_enumTypes,
MessageInfos: file_test_rpc_messagequeue_proto_msgTypes,
GoTypes: file_rpcproto_messagequeue_proto_goTypes,
DependencyIndexes: file_rpcproto_messagequeue_proto_depIdxs,
EnumInfos: file_rpcproto_messagequeue_proto_enumTypes,
MessageInfos: file_rpcproto_messagequeue_proto_msgTypes,
}.Build()
File_test_rpc_messagequeue_proto = out.File
file_test_rpc_messagequeue_proto_rawDesc = nil
file_test_rpc_messagequeue_proto_goTypes = nil
file_test_rpc_messagequeue_proto_depIdxs = nil
File_rpcproto_messagequeue_proto = out.File
file_rpcproto_messagequeue_proto_rawDesc = nil
file_rpcproto_messagequeue_proto_goTypes = nil
file_rpcproto_messagequeue_proto_depIdxs = nil
}

View File

@@ -31,7 +31,7 @@ message DBQueueSubscribeReq {
SubscribeType SubType = 1; //订阅类型
SubscribeMethod Method = 2; //订阅方法
string CustomerId = 3; //消费者Id
int32 FromNodeId = 4;
string FromNodeId = 4;
string RpcMethod = 5;
string TopicName = 6; //主题名称
uint64 StartIndex = 7; //开始位置 ,格式前4位是时间戳秒后面是序号。如果填0时服务自动修改成(4bit 当前时间秒)| (0000 4bit)

91
rpc/natsclient.go Normal file
View File

@@ -0,0 +1,91 @@
package rpc
import (
"github.com/duanhf2012/origin/v2/log"
"github.com/duanhf2012/origin/v2/network"
"github.com/nats-io/nats.go"
"reflect"
"time"
)
// NatsClient 跨结点连接的Client
type NatsClient struct {
localNodeId string
notifyEventFun NotifyEventFun
natsConn *nats.Conn
client *Client
}
func (nc *NatsClient) Start(natsConn *nats.Conn) error {
nc.natsConn = natsConn
_, err := nc.natsConn.QueueSubscribe("oc."+nc.localNodeId, "oc", nc.onSubscribe)
return err
}
func (nc *NatsClient) onSubscribe(msg *nats.Msg) {
//处理消息
nc.client.processRpcResponse(msg.Data)
}
func (nc *NatsClient) SetConn(conn *network.NetConn) {
}
func (nc *NatsClient) Close(waitDone bool) {
}
func (nc *NatsClient) Run() {
}
func (nc *NatsClient) OnClose() {
}
func (nc *NatsClient) Bind(server IServer) {
s := server.(*NatsServer)
nc.natsConn = s.natsConn
}
func (nc *NatsClient) Go(nodeId string, timeout time.Duration, rpcHandler IRpcHandler, noReply bool, serviceMethod string, args interface{}, reply interface{}) *Call {
_, processor := GetProcessorType(args)
InParam, err := processor.Marshal(args)
if err != nil {
log.Error("Marshal is fail", log.ErrorField("error", err))
call := MakeCall()
call.DoError(err)
return call
}
return nc.client.rawGo(nodeId, nc, timeout, rpcHandler, processor, noReply, 0, serviceMethod, InParam, reply)
}
func (nc *NatsClient) RawGo(nodeId string, timeout time.Duration, rpcHandler IRpcHandler, processor IRpcProcessor, noReply bool, rpcMethodId uint32, serviceMethod string, rawArgs []byte, reply interface{}) *Call {
return nc.client.rawGo(nodeId, nc, timeout, rpcHandler, processor, noReply, rpcMethodId, serviceMethod, rawArgs, reply)
}
func (nc *NatsClient) AsyncCall(nodeId string, timeout time.Duration, rpcHandler IRpcHandler, serviceMethod string, callback reflect.Value, args interface{}, replyParam interface{}) (CancelRpc, error) {
cancelRpc, err := nc.client.asyncCall(nodeId, nc, timeout, rpcHandler, serviceMethod, callback, args, replyParam)
if err != nil {
callback.Call([]reflect.Value{reflect.ValueOf(replyParam), reflect.ValueOf(err)})
}
return cancelRpc, nil
}
func (nc *NatsClient) WriteMsg(nodeId string, args ...[]byte) error {
buff := make([]byte, 0, 4096)
for _, ar := range args {
buff = append(buff, ar...)
}
var msg nats.Msg
msg.Subject = "os." + nodeId
msg.Data = buff
msg.Header = nats.Header{}
msg.Header.Set("fnode", nc.localNodeId)
return nc.natsConn.PublishMsg(&msg)
}
func (nc *NatsClient) IsConnected() bool {
return nc.natsConn != nil && nc.natsConn.Status() == nats.CONNECTED
}

127
rpc/natsserver.go Normal file
View File

@@ -0,0 +1,127 @@
package rpc
import (
"github.com/duanhf2012/origin/v2/log"
"github.com/nats-io/nats.go"
"time"
)
type NatsServer struct {
BaseServer
natsUrl string
natsConn *nats.Conn
NoRandomize bool
nodeSubTopic string
compressBytesLen int
notifyEventFun NotifyEventFun
}
const reconnectWait = 3 * time.Second
func (ns *NatsServer) Start() error {
var err error
var options []nats.Option
options = append(options, nats.DisconnectErrHandler(func(nc *nats.Conn, err error) {
log.Error("nats is disconnected", log.String("connUrl", nc.ConnectedUrl()))
ns.notifyEventFun(&NatsConnEvent{IsConnect: false})
}))
options = append(options, nats.ConnectHandler(func(nc *nats.Conn) {
log.Info("nats is connected", log.String("connUrl", nc.ConnectedUrl()))
ns.notifyEventFun(&NatsConnEvent{IsConnect: true})
}))
options = append(options, nats.ReconnectHandler(func(nc *nats.Conn) {
ns.notifyEventFun(&NatsConnEvent{IsConnect: true})
log.Info("nats is reconnected", log.String("connUrl", nc.ConnectedUrl()))
}))
options = append(options, nats.MaxReconnects(-1))
options = append(options, nats.ReconnectWait(reconnectWait))
if ns.NoRandomize {
options = append(options, nats.DontRandomize())
}
ns.natsConn, err = nats.Connect(ns.natsUrl, options...)
if err != nil {
log.Error("Connect to nats fail", log.String("natsUrl", ns.natsUrl), log.ErrorField("err", err))
return err
}
//开始订阅
_, err = ns.natsConn.QueueSubscribe(ns.nodeSubTopic, "os", func(msg *nats.Msg) {
ns.processRpcRequest(msg.Data, msg.Header.Get("fnode"), ns.WriteResponse)
})
return err
}
func (ns *NatsServer) WriteResponse(processor IRpcProcessor, nodeId string, serviceMethod string, seq uint64, reply interface{}, rpcError RpcError) {
var mReply []byte
var err error
if reply != nil {
mReply, err = processor.Marshal(reply)
if err != nil {
rpcError = ConvertError(err)
}
}
var rpcResponse RpcResponse
rpcResponse.RpcResponseData = processor.MakeRpcResponse(seq, rpcError, mReply)
bytes, err := processor.Marshal(rpcResponse.RpcResponseData)
defer processor.ReleaseRpcResponse(rpcResponse.RpcResponseData)
if err != nil {
log.Error("marshal RpcResponseData failed", log.String("serviceMethod", serviceMethod), log.ErrorField("error", err))
return
}
var compressBuff []byte
bCompress := uint8(0)
if ns.compressBytesLen > 0 && len(bytes) >= ns.compressBytesLen {
compressBuff, err = compressor.CompressBlock(bytes)
if err != nil {
log.Error("CompressBlock failed", log.String("serviceMethod", serviceMethod), log.ErrorField("error", err))
return
}
if len(compressBuff) < len(bytes) {
bytes = compressBuff
bCompress = 1 << 7
}
}
sendData := make([]byte, 0, 4096)
byteTypeAndCompress := []byte{uint8(processor.GetProcessorType()) | bCompress}
sendData = append(sendData, byteTypeAndCompress...)
sendData = append(sendData, bytes...)
err = ns.natsConn.PublishMsg(&nats.Msg{Subject: "oc." + nodeId, Data: sendData})
if cap(compressBuff) > 0 {
compressor.CompressBufferCollection(compressBuff)
}
if err != nil {
log.Error("WriteMsg error,Rpc return is fail", log.String("nodeId", nodeId), log.String("serviceMethod", serviceMethod), log.ErrorField("error", err))
}
}
func (ns *NatsServer) Stop() {
if ns.natsConn != nil {
ns.natsConn.Close()
}
}
func (ns *NatsServer) initServer(natsUrl string, noRandomize bool, localNodeId string, compressBytesLen int, rpcHandleFinder RpcHandleFinder, notifyEventFun NotifyEventFun) {
ns.natsUrl = natsUrl
ns.NoRandomize = noRandomize
ns.localNodeId = localNodeId
ns.compressBytesLen = compressBytesLen
ns.notifyEventFun = notifyEventFun
ns.initBaseServer(compressBytesLen, rpcHandleFinder)
ns.nodeSubTopic = "os." + localNodeId //服务器
}

662
rpc/origindiscover.pb.go Normal file
View File

@@ -0,0 +1,662 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.31.0
// protoc v4.24.0
// source: rpcproto/origindiscover.proto
package rpc
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type NodeInfo struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
NodeId string `protobuf:"bytes,1,opt,name=NodeId,proto3" json:"NodeId,omitempty"`
ListenAddr string `protobuf:"bytes,2,opt,name=ListenAddr,proto3" json:"ListenAddr,omitempty"`
MaxRpcParamLen uint32 `protobuf:"varint,3,opt,name=MaxRpcParamLen,proto3" json:"MaxRpcParamLen,omitempty"`
Private bool `protobuf:"varint,4,opt,name=Private,proto3" json:"Private,omitempty"`
Retire bool `protobuf:"varint,5,opt,name=Retire,proto3" json:"Retire,omitempty"`
PublicServiceList []string `protobuf:"bytes,6,rep,name=PublicServiceList,proto3" json:"PublicServiceList,omitempty"`
}
func (x *NodeInfo) Reset() {
*x = NodeInfo{}
if protoimpl.UnsafeEnabled {
mi := &file_rpcproto_origindiscover_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *NodeInfo) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NodeInfo) ProtoMessage() {}
func (x *NodeInfo) ProtoReflect() protoreflect.Message {
mi := &file_rpcproto_origindiscover_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NodeInfo.ProtoReflect.Descriptor instead.
func (*NodeInfo) Descriptor() ([]byte, []int) {
return file_rpcproto_origindiscover_proto_rawDescGZIP(), []int{0}
}
func (x *NodeInfo) GetNodeId() string {
if x != nil {
return x.NodeId
}
return ""
}
func (x *NodeInfo) GetListenAddr() string {
if x != nil {
return x.ListenAddr
}
return ""
}
func (x *NodeInfo) GetMaxRpcParamLen() uint32 {
if x != nil {
return x.MaxRpcParamLen
}
return 0
}
func (x *NodeInfo) GetPrivate() bool {
if x != nil {
return x.Private
}
return false
}
func (x *NodeInfo) GetRetire() bool {
if x != nil {
return x.Retire
}
return false
}
func (x *NodeInfo) GetPublicServiceList() []string {
if x != nil {
return x.PublicServiceList
}
return nil
}
// Client->Master
type RegServiceDiscoverReq struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
NodeInfo *NodeInfo `protobuf:"bytes,1,opt,name=nodeInfo,proto3" json:"nodeInfo,omitempty"`
}
func (x *RegServiceDiscoverReq) Reset() {
*x = RegServiceDiscoverReq{}
if protoimpl.UnsafeEnabled {
mi := &file_rpcproto_origindiscover_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *RegServiceDiscoverReq) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RegServiceDiscoverReq) ProtoMessage() {}
func (x *RegServiceDiscoverReq) ProtoReflect() protoreflect.Message {
mi := &file_rpcproto_origindiscover_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RegServiceDiscoverReq.ProtoReflect.Descriptor instead.
func (*RegServiceDiscoverReq) Descriptor() ([]byte, []int) {
return file_rpcproto_origindiscover_proto_rawDescGZIP(), []int{1}
}
func (x *RegServiceDiscoverReq) GetNodeInfo() *NodeInfo {
if x != nil {
return x.NodeInfo
}
return nil
}
// Master->Client
type SubscribeDiscoverNotify struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
MasterNodeId string `protobuf:"bytes,1,opt,name=MasterNodeId,proto3" json:"MasterNodeId,omitempty"`
IsFull bool `protobuf:"varint,2,opt,name=IsFull,proto3" json:"IsFull,omitempty"`
DelNodeId string `protobuf:"bytes,3,opt,name=DelNodeId,proto3" json:"DelNodeId,omitempty"`
NodeInfo []*NodeInfo `protobuf:"bytes,4,rep,name=nodeInfo,proto3" json:"nodeInfo,omitempty"`
}
func (x *SubscribeDiscoverNotify) Reset() {
*x = SubscribeDiscoverNotify{}
if protoimpl.UnsafeEnabled {
mi := &file_rpcproto_origindiscover_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *SubscribeDiscoverNotify) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SubscribeDiscoverNotify) ProtoMessage() {}
func (x *SubscribeDiscoverNotify) ProtoReflect() protoreflect.Message {
mi := &file_rpcproto_origindiscover_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SubscribeDiscoverNotify.ProtoReflect.Descriptor instead.
func (*SubscribeDiscoverNotify) Descriptor() ([]byte, []int) {
return file_rpcproto_origindiscover_proto_rawDescGZIP(), []int{2}
}
func (x *SubscribeDiscoverNotify) GetMasterNodeId() string {
if x != nil {
return x.MasterNodeId
}
return ""
}
func (x *SubscribeDiscoverNotify) GetIsFull() bool {
if x != nil {
return x.IsFull
}
return false
}
func (x *SubscribeDiscoverNotify) GetDelNodeId() string {
if x != nil {
return x.DelNodeId
}
return ""
}
func (x *SubscribeDiscoverNotify) GetNodeInfo() []*NodeInfo {
if x != nil {
return x.NodeInfo
}
return nil
}
// Client->Master
type NodeRetireReq struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
NodeInfo *NodeInfo `protobuf:"bytes,1,opt,name=nodeInfo,proto3" json:"nodeInfo,omitempty"`
}
func (x *NodeRetireReq) Reset() {
*x = NodeRetireReq{}
if protoimpl.UnsafeEnabled {
mi := &file_rpcproto_origindiscover_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *NodeRetireReq) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NodeRetireReq) ProtoMessage() {}
func (x *NodeRetireReq) ProtoReflect() protoreflect.Message {
mi := &file_rpcproto_origindiscover_proto_msgTypes[3]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NodeRetireReq.ProtoReflect.Descriptor instead.
func (*NodeRetireReq) Descriptor() ([]byte, []int) {
return file_rpcproto_origindiscover_proto_rawDescGZIP(), []int{3}
}
func (x *NodeRetireReq) GetNodeInfo() *NodeInfo {
if x != nil {
return x.NodeInfo
}
return nil
}
// Master->Client
type Empty struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
}
func (x *Empty) Reset() {
*x = Empty{}
if protoimpl.UnsafeEnabled {
mi := &file_rpcproto_origindiscover_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Empty) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Empty) ProtoMessage() {}
func (x *Empty) ProtoReflect() protoreflect.Message {
mi := &file_rpcproto_origindiscover_proto_msgTypes[4]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Empty.ProtoReflect.Descriptor instead.
func (*Empty) Descriptor() ([]byte, []int) {
return file_rpcproto_origindiscover_proto_rawDescGZIP(), []int{4}
}
// Client->Master
type Ping struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
NodeId string `protobuf:"bytes,1,opt,name=NodeId,proto3" json:"NodeId,omitempty"`
}
func (x *Ping) Reset() {
*x = Ping{}
if protoimpl.UnsafeEnabled {
mi := &file_rpcproto_origindiscover_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Ping) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Ping) ProtoMessage() {}
func (x *Ping) ProtoReflect() protoreflect.Message {
mi := &file_rpcproto_origindiscover_proto_msgTypes[5]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Ping.ProtoReflect.Descriptor instead.
func (*Ping) Descriptor() ([]byte, []int) {
return file_rpcproto_origindiscover_proto_rawDescGZIP(), []int{5}
}
func (x *Ping) GetNodeId() string {
if x != nil {
return x.NodeId
}
return ""
}
// Master->Client
type Pong struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Ok bool `protobuf:"varint,1,opt,name=ok,proto3" json:"ok,omitempty"`
}
func (x *Pong) Reset() {
*x = Pong{}
if protoimpl.UnsafeEnabled {
mi := &file_rpcproto_origindiscover_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Pong) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Pong) ProtoMessage() {}
func (x *Pong) ProtoReflect() protoreflect.Message {
mi := &file_rpcproto_origindiscover_proto_msgTypes[6]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Pong.ProtoReflect.Descriptor instead.
func (*Pong) Descriptor() ([]byte, []int) {
return file_rpcproto_origindiscover_proto_rawDescGZIP(), []int{6}
}
func (x *Pong) GetOk() bool {
if x != nil {
return x.Ok
}
return false
}
type UnRegServiceDiscoverReq struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
NodeId string `protobuf:"bytes,1,opt,name=NodeId,proto3" json:"NodeId,omitempty"`
}
func (x *UnRegServiceDiscoverReq) Reset() {
*x = UnRegServiceDiscoverReq{}
if protoimpl.UnsafeEnabled {
mi := &file_rpcproto_origindiscover_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *UnRegServiceDiscoverReq) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*UnRegServiceDiscoverReq) ProtoMessage() {}
func (x *UnRegServiceDiscoverReq) ProtoReflect() protoreflect.Message {
mi := &file_rpcproto_origindiscover_proto_msgTypes[7]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use UnRegServiceDiscoverReq.ProtoReflect.Descriptor instead.
func (*UnRegServiceDiscoverReq) Descriptor() ([]byte, []int) {
return file_rpcproto_origindiscover_proto_rawDescGZIP(), []int{7}
}
func (x *UnRegServiceDiscoverReq) GetNodeId() string {
if x != nil {
return x.NodeId
}
return ""
}
var File_rpcproto_origindiscover_proto protoreflect.FileDescriptor
var file_rpcproto_origindiscover_proto_rawDesc = []byte{
0x0a, 0x1d, 0x72, 0x70, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x72, 0x69, 0x67, 0x69,
0x6e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12,
0x03, 0x72, 0x70, 0x63, 0x22, 0xca, 0x01, 0x0a, 0x08, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66,
0x6f, 0x12, 0x16, 0x0a, 0x06, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
0x09, 0x52, 0x06, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x4c, 0x69, 0x73,
0x74, 0x65, 0x6e, 0x41, 0x64, 0x64, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x4c,
0x69, 0x73, 0x74, 0x65, 0x6e, 0x41, 0x64, 0x64, 0x72, 0x12, 0x26, 0x0a, 0x0e, 0x4d, 0x61, 0x78,
0x52, 0x70, 0x63, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x4c, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28,
0x0d, 0x52, 0x0e, 0x4d, 0x61, 0x78, 0x52, 0x70, 0x63, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x4c, 0x65,
0x6e, 0x12, 0x18, 0x0a, 0x07, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01,
0x28, 0x08, 0x52, 0x07, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x52,
0x65, 0x74, 0x69, 0x72, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x52, 0x65, 0x74,
0x69, 0x72, 0x65, 0x12, 0x2c, 0x0a, 0x11, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x53, 0x65, 0x72,
0x76, 0x69, 0x63, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x11,
0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x69, 0x73,
0x74, 0x22, 0x42, 0x0a, 0x15, 0x52, 0x65, 0x67, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44,
0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x52, 0x65, 0x71, 0x12, 0x29, 0x0a, 0x08, 0x6e, 0x6f,
0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x72,
0x70, 0x63, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x6e, 0x6f, 0x64,
0x65, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x9e, 0x01, 0x0a, 0x17, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72,
0x69, 0x62, 0x65, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x4e, 0x6f, 0x74, 0x69, 0x66,
0x79, 0x12, 0x22, 0x0a, 0x0c, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x6f, 0x64, 0x65, 0x49,
0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x4e,
0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x49, 0x73, 0x46, 0x75, 0x6c, 0x6c, 0x18,
0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x49, 0x73, 0x46, 0x75, 0x6c, 0x6c, 0x12, 0x1c, 0x0a,
0x09, 0x44, 0x65, 0x6c, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09,
0x52, 0x09, 0x44, 0x65, 0x6c, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x29, 0x0a, 0x08, 0x6e,
0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0d, 0x2e,
0x72, 0x70, 0x63, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x6e, 0x6f,
0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x3a, 0x0a, 0x0d, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65,
0x74, 0x69, 0x72, 0x65, 0x52, 0x65, 0x71, 0x12, 0x29, 0x0a, 0x08, 0x6e, 0x6f, 0x64, 0x65, 0x49,
0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x72, 0x70, 0x63, 0x2e,
0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x6e,
0x66, 0x6f, 0x22, 0x07, 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x1e, 0x0a, 0x04, 0x50,
0x69, 0x6e, 0x67, 0x12, 0x16, 0x0a, 0x06, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x18, 0x01, 0x20,
0x01, 0x28, 0x09, 0x52, 0x06, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x22, 0x16, 0x0a, 0x04, 0x50,
0x6f, 0x6e, 0x67, 0x12, 0x0e, 0x0a, 0x02, 0x6f, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52,
0x02, 0x6f, 0x6b, 0x22, 0x31, 0x0a, 0x17, 0x55, 0x6e, 0x52, 0x65, 0x67, 0x53, 0x65, 0x72, 0x76,
0x69, 0x63, 0x65, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x52, 0x65, 0x71, 0x12, 0x16,
0x0a, 0x06, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06,
0x4e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x42, 0x07, 0x5a, 0x05, 0x2e, 0x3b, 0x72, 0x70, 0x63, 0x62,
0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_rpcproto_origindiscover_proto_rawDescOnce sync.Once
file_rpcproto_origindiscover_proto_rawDescData = file_rpcproto_origindiscover_proto_rawDesc
)
func file_rpcproto_origindiscover_proto_rawDescGZIP() []byte {
file_rpcproto_origindiscover_proto_rawDescOnce.Do(func() {
file_rpcproto_origindiscover_proto_rawDescData = protoimpl.X.CompressGZIP(file_rpcproto_origindiscover_proto_rawDescData)
})
return file_rpcproto_origindiscover_proto_rawDescData
}
var file_rpcproto_origindiscover_proto_msgTypes = make([]protoimpl.MessageInfo, 8)
var file_rpcproto_origindiscover_proto_goTypes = []interface{}{
(*NodeInfo)(nil), // 0: rpc.NodeInfo
(*RegServiceDiscoverReq)(nil), // 1: rpc.RegServiceDiscoverReq
(*SubscribeDiscoverNotify)(nil), // 2: rpc.SubscribeDiscoverNotify
(*NodeRetireReq)(nil), // 3: rpc.NodeRetireReq
(*Empty)(nil), // 4: rpc.Empty
(*Ping)(nil), // 5: rpc.Ping
(*Pong)(nil), // 6: rpc.Pong
(*UnRegServiceDiscoverReq)(nil), // 7: rpc.UnRegServiceDiscoverReq
}
var file_rpcproto_origindiscover_proto_depIdxs = []int32{
0, // 0: rpc.RegServiceDiscoverReq.nodeInfo:type_name -> rpc.NodeInfo
0, // 1: rpc.SubscribeDiscoverNotify.nodeInfo:type_name -> rpc.NodeInfo
0, // 2: rpc.NodeRetireReq.nodeInfo:type_name -> rpc.NodeInfo
3, // [3:3] is the sub-list for method output_type
3, // [3:3] is the sub-list for method input_type
3, // [3:3] is the sub-list for extension type_name
3, // [3:3] is the sub-list for extension extendee
0, // [0:3] is the sub-list for field type_name
}
func init() { file_rpcproto_origindiscover_proto_init() }
func file_rpcproto_origindiscover_proto_init() {
if File_rpcproto_origindiscover_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_rpcproto_origindiscover_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*NodeInfo); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_rpcproto_origindiscover_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*RegServiceDiscoverReq); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_rpcproto_origindiscover_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SubscribeDiscoverNotify); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_rpcproto_origindiscover_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*NodeRetireReq); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_rpcproto_origindiscover_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Empty); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_rpcproto_origindiscover_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Ping); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_rpcproto_origindiscover_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Pong); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_rpcproto_origindiscover_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*UnRegServiceDiscoverReq); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_rpcproto_origindiscover_proto_rawDesc,
NumEnums: 0,
NumMessages: 8,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_rpcproto_origindiscover_proto_goTypes,
DependencyIndexes: file_rpcproto_origindiscover_proto_depIdxs,
MessageInfos: file_rpcproto_origindiscover_proto_msgTypes,
}.Build()
File_rpcproto_origindiscover_proto = out.File
file_rpcproto_origindiscover_proto_rawDesc = nil
file_rpcproto_origindiscover_proto_goTypes = nil
file_rpcproto_origindiscover_proto_depIdxs = nil
}

49
rpc/origindiscover.proto Normal file
View File

@@ -0,0 +1,49 @@
syntax = "proto3";
package rpc;
option go_package = ".;rpc";
message NodeInfo{
string NodeId = 1;
string ListenAddr = 2;
uint32 MaxRpcParamLen = 3;
bool Private = 4;
bool Retire = 5;
repeated string PublicServiceList = 6;
}
//Client->Master
message RegServiceDiscoverReq{
NodeInfo nodeInfo = 1;
}
//Master->Client
message SubscribeDiscoverNotify{
string MasterNodeId = 1;
bool IsFull = 2;
string DelNodeId = 3;
repeated NodeInfo nodeInfo = 4;
}
//Client->Master
message NodeRetireReq{
NodeInfo nodeInfo = 1;
}
//Master->Client
message Empty{
}
//Client->Master
message Ping{
string NodeId = 1;
}
//Master->Client
message Pong{
bool ok = 1;
}
message UnRegServiceDiscoverReq{
string NodeId = 1;
}

View File

@@ -1,23 +1,23 @@
package rpc
import (
"github.com/duanhf2012/origin/util/sync"
"google.golang.org/protobuf/proto"
"fmt"
"github.com/duanhf2012/origin/v2/util/sync"
"google.golang.org/protobuf/proto"
)
type PBProcessor struct {
}
var rpcPbResponseDataPool =sync.NewPool(make(chan interface{},10240), func()interface{}{
var rpcPbResponseDataPool = sync.NewPool(make(chan interface{}, 10240), func() interface{} {
return &PBRpcResponseData{}
})
var rpcPbRequestDataPool =sync.NewPool(make(chan interface{},10240), func()interface{}{
var rpcPbRequestDataPool = sync.NewPool(make(chan interface{}, 10240), func() interface{} {
return &PBRpcRequestData{}
})
func (slf *PBRpcRequestData) MakeRequest(seq uint64,rpcMethodId uint32,serviceMethod string,noReply bool,inParam []byte) *PBRpcRequestData{
func (slf *PBRpcRequestData) MakeRequest(seq uint64, rpcMethodId uint32, serviceMethod string, noReply bool, inParam []byte) *PBRpcRequestData {
slf.Seq = seq
slf.RpcMethodId = rpcMethodId
slf.ServiceMethod = serviceMethod
@@ -27,8 +27,7 @@ func (slf *PBRpcRequestData) MakeRequest(seq uint64,rpcMethodId uint32,serviceMe
return slf
}
func (slf *PBRpcResponseData) MakeRespone(seq uint64,err RpcError,reply []byte) *PBRpcResponseData{
func (slf *PBRpcResponseData) MakeResponse(seq uint64, err RpcError, reply []byte) *PBRpcResponseData {
slf.Seq = seq
slf.Error = err.Error()
slf.Reply = reply
@@ -36,61 +35,61 @@ func (slf *PBRpcResponseData) MakeRespone(seq uint64,err RpcError,reply []byte)
return slf
}
func (slf *PBProcessor) Marshal(v interface{}) ([]byte, error){
func (slf *PBProcessor) Marshal(v interface{}) ([]byte, error) {
return proto.Marshal(v.(proto.Message))
}
func (slf *PBProcessor) Unmarshal(data []byte, msg interface{}) error{
protoMsg,ok := msg.(proto.Message)
func (slf *PBProcessor) Unmarshal(data []byte, msg interface{}) error {
protoMsg, ok := msg.(proto.Message)
if ok == false {
return fmt.Errorf("%+v is not of proto.Message type",msg)
return fmt.Errorf("%+v is not of proto.Message type", msg)
}
return proto.Unmarshal(data, protoMsg)
}
func (slf *PBProcessor) MakeRpcRequest(seq uint64,rpcMethodId uint32,serviceMethod string,noReply bool,inParam []byte) IRpcRequestData{
func (slf *PBProcessor) MakeRpcRequest(seq uint64, rpcMethodId uint32, serviceMethod string, noReply bool, inParam []byte) IRpcRequestData {
pGogoPbRpcRequestData := rpcPbRequestDataPool.Get().(*PBRpcRequestData)
pGogoPbRpcRequestData.MakeRequest(seq,rpcMethodId,serviceMethod,noReply,inParam)
pGogoPbRpcRequestData.MakeRequest(seq, rpcMethodId, serviceMethod, noReply, inParam)
return pGogoPbRpcRequestData
}
func (slf *PBProcessor) MakeRpcResponse(seq uint64,err RpcError,reply []byte) IRpcResponseData {
func (slf *PBProcessor) MakeRpcResponse(seq uint64, err RpcError, reply []byte) IRpcResponseData {
pPBRpcResponseData := rpcPbResponseDataPool.Get().(*PBRpcResponseData)
pPBRpcResponseData.MakeRespone(seq,err,reply)
pPBRpcResponseData.MakeResponse(seq, err, reply)
return pPBRpcResponseData
}
func (slf *PBProcessor) ReleaseRpcRequest(rpcRequestData IRpcRequestData){
func (slf *PBProcessor) ReleaseRpcRequest(rpcRequestData IRpcRequestData) {
rpcPbRequestDataPool.Put(rpcRequestData)
}
func (slf *PBProcessor) ReleaseRpcResponse(rpcResponseData IRpcResponseData){
func (slf *PBProcessor) ReleaseRpcResponse(rpcResponseData IRpcResponseData) {
rpcPbResponseDataPool.Put(rpcResponseData)
}
func (slf *PBProcessor) IsParse(param interface{}) bool {
_,ok := param.(proto.Message)
_, ok := param.(proto.Message)
return ok
}
func (slf *PBProcessor) GetProcessorType() RpcProcessorType{
return RpcProcessorGoGoPB
func (slf *PBProcessor) GetProcessorType() RpcProcessorType {
return RpcProcessorPB
}
func (slf *PBProcessor) Clone(src interface{}) (interface{},error){
srcMsg,ok := src.(proto.Message)
func (slf *PBProcessor) Clone(src interface{}) (interface{}, error) {
srcMsg, ok := src.(proto.Message)
if ok == false {
return nil,fmt.Errorf("param is not of proto.message type")
return nil, fmt.Errorf("param is not of proto.message type")
}
return proto.Clone(srcMsg),nil
return proto.Clone(srcMsg), nil
}
func (slf *PBRpcRequestData) IsNoReply() bool{
func (slf *PBRpcRequestData) IsNoReply() bool {
return slf.GetNoReply()
}
func (slf *PBRpcResponseData) GetErr() *RpcError {
func (slf *PBRpcResponseData) GetErr() *RpcError {
if slf.GetError() == "" {
return nil
}
@@ -98,9 +97,3 @@ func (slf *PBRpcResponseData) GetErr() *RpcError {
err := RpcError(slf.GetError())
return &err
}

View File

@@ -1,24 +1,23 @@
package rpc
import (
"errors"
"fmt"
"github.com/duanhf2012/origin/log"
"github.com/duanhf2012/origin/network"
"github.com/duanhf2012/origin/v2/log"
"github.com/duanhf2012/origin/v2/network"
"math"
"reflect"
"runtime"
"sync/atomic"
"time"
)
//跨结点连接的Client
// RClient 跨结点连接的Client
type RClient struct {
compressBytesLen int
selfClient *Client
network.TCPClient
conn *network.TCPConn
TriggerRpcConnEvent
conn *network.NetConn
notifyEventFun NotifyEventFun
}
func (rc *RClient) IsConnected() bool {
@@ -28,7 +27,7 @@ func (rc *RClient) IsConnected() bool {
return rc.conn != nil && rc.conn.IsConnected() == true
}
func (rc *RClient) GetConn() *network.TCPConn{
func (rc *RClient) GetConn() *network.NetConn {
rc.Lock()
conn := rc.conn
rc.Unlock()
@@ -36,262 +35,89 @@ func (rc *RClient) GetConn() *network.TCPConn{
return conn
}
func (rc *RClient) SetConn(conn *network.TCPConn){
func (rc *RClient) SetConn(conn *network.NetConn) {
rc.Lock()
rc.conn = conn
rc.Unlock()
}
func (rc *RClient) Go(timeout time.Duration,rpcHandler IRpcHandler,noReply bool, serviceMethod string, args interface{}, reply interface{}) *Call {
func (rc *RClient) WriteMsg(nodeId string, args ...[]byte) error {
return rc.conn.WriteMsg(args...)
}
func (rc *RClient) Go(nodeId string, timeout time.Duration, rpcHandler IRpcHandler, noReply bool, serviceMethod string, args interface{}, reply interface{}) *Call {
_, processor := GetProcessorType(args)
InParam, err := processor.Marshal(args)
if err != nil {
log.Error("Marshal is fail",log.ErrorAttr("error",err))
log.Error("Marshal is fail", log.ErrorField("error", err))
call := MakeCall()
call.DoError(err)
return call
}
return rc.RawGo(timeout,rpcHandler,processor, noReply, 0, serviceMethod, InParam, reply)
return rc.selfClient.rawGo(nodeId, rc, timeout, rpcHandler, processor, noReply, 0, serviceMethod, InParam, reply)
}
func (rc *RClient) RawGo(timeout time.Duration,rpcHandler IRpcHandler,processor IRpcProcessor, noReply bool, rpcMethodId uint32, serviceMethod string, rawArgs []byte, reply interface{}) *Call {
call := MakeCall()
call.ServiceMethod = serviceMethod
call.Reply = reply
call.Seq = rc.selfClient.generateSeq()
call.TimeOut = timeout
request := MakeRpcRequest(processor, call.Seq, rpcMethodId, serviceMethod, noReply, rawArgs)
bytes, err := processor.Marshal(request.RpcRequestData)
ReleaseRpcRequest(request)
if err != nil {
call.Seq = 0
log.Error("marshal is fail",log.String("error",err.Error()))
call.DoError(err)
return call
}
conn := rc.GetConn()
if conn == nil || conn.IsConnected()==false {
call.Seq = 0
sErr := errors.New(serviceMethod + " was called failed,rpc client is disconnect")
log.Error("conn is disconnect",log.String("error",sErr.Error()))
call.DoError(sErr)
return call
}
var compressBuff[]byte
bCompress := uint8(0)
if rc.compressBytesLen > 0 && len(bytes) >= rc.compressBytesLen {
var cErr error
compressBuff,cErr = compressor.CompressBlock(bytes)
if cErr != nil {
call.Seq = 0
log.Error("compress fail",log.String("error",cErr.Error()))
call.DoError(cErr)
return call
}
if len(compressBuff) < len(bytes) {
bytes = compressBuff
bCompress = 1<<7
}
}
if noReply == false {
rc.selfClient.AddPending(call)
}
err = conn.WriteMsg([]byte{uint8(processor.GetProcessorType())|bCompress}, bytes)
if cap(compressBuff) >0 {
compressor.CompressBufferCollection(compressBuff)
}
if err != nil {
rc.selfClient.RemovePending(call.Seq)
log.Error("WiteMsg is fail",log.ErrorAttr("error",err))
call.Seq = 0
call.DoError(err)
}
return call
func (rc *RClient) RawGo(nodeId string, timeout time.Duration, rpcHandler IRpcHandler, processor IRpcProcessor, noReply bool, rpcMethodId uint32, serviceMethod string, rawArgs []byte, reply interface{}) *Call {
return rc.selfClient.rawGo(nodeId, rc, timeout, rpcHandler, processor, noReply, rpcMethodId, serviceMethod, rawArgs, reply)
}
func (rc *RClient) AsyncCall(timeout time.Duration,rpcHandler IRpcHandler, serviceMethod string, callback reflect.Value, args interface{}, replyParam interface{},cancelable bool) (CancelRpc,error) {
cancelRpc,err := rc.asyncCall(timeout,rpcHandler, serviceMethod, callback, args, replyParam,cancelable)
func (rc *RClient) AsyncCall(nodeId string, timeout time.Duration, rpcHandler IRpcHandler, serviceMethod string, callback reflect.Value, args interface{}, replyParam interface{}) (CancelRpc, error) {
cancelRpc, err := rc.selfClient.asyncCall(nodeId, rc, timeout, rpcHandler, serviceMethod, callback, args, replyParam)
if err != nil {
callback.Call([]reflect.Value{reflect.ValueOf(replyParam), reflect.ValueOf(err)})
}
return cancelRpc,nil
}
func (rc *RClient) asyncCall(timeout time.Duration,rpcHandler IRpcHandler, serviceMethod string, callback reflect.Value, args interface{}, replyParam interface{},cancelable bool) (CancelRpc,error) {
processorType, processor := GetProcessorType(args)
InParam, herr := processor.Marshal(args)
if herr != nil {
return emptyCancelRpc,herr
}
seq := rc.selfClient.generateSeq()
request := MakeRpcRequest(processor, seq, 0, serviceMethod, false, InParam)
bytes, err := processor.Marshal(request.RpcRequestData)
ReleaseRpcRequest(request)
if err != nil {
return emptyCancelRpc,err
}
conn := rc.GetConn()
if conn == nil || conn.IsConnected()==false {
return emptyCancelRpc,errors.New("Rpc server is disconnect,call " + serviceMethod)
}
var compressBuff[]byte
bCompress := uint8(0)
if rc.compressBytesLen>0 &&len(bytes) >= rc.compressBytesLen {
var cErr error
compressBuff,cErr = compressor.CompressBlock(bytes)
if cErr != nil {
return emptyCancelRpc,cErr
}
if len(compressBuff) < len(bytes) {
bytes = compressBuff
bCompress = 1<<7
}
}
call := MakeCall()
call.Reply = replyParam
call.callback = &callback
call.rpcHandler = rpcHandler
call.ServiceMethod = serviceMethod
call.Seq = seq
call.TimeOut = timeout
rc.selfClient.AddPending(call)
err = conn.WriteMsg([]byte{uint8(processorType)|bCompress}, bytes)
if cap(compressBuff) >0 {
compressor.CompressBufferCollection(compressBuff)
}
if err != nil {
rc.selfClient.RemovePending(call.Seq)
ReleaseCall(call)
return emptyCancelRpc,err
}
if cancelable {
rpcCancel := RpcCancel{CallSeq:seq,Cli: rc.selfClient}
return rpcCancel.CancelRpc,nil
}
return emptyCancelRpc,nil
return cancelRpc, nil
}
func (rc *RClient) Run() {
defer func() {
if r := recover(); r != nil {
buf := make([]byte, 4096)
l := runtime.Stack(buf, false)
errString := fmt.Sprint(r)
log.Dump(string(buf[:l]),log.String("error",errString))
log.StackError(fmt.Sprint(r))
}
}()
rc.TriggerRpcConnEvent(true, rc.selfClient.GetClientId(), rc.selfClient.GetNodeId())
var eventData RpcConnEvent
eventData.IsConnect = true
eventData.NodeId = rc.selfClient.GetTargetNodeId()
rc.notifyEventFun(&eventData)
for {
bytes, err := rc.conn.ReadMsg()
if err != nil {
log.Error("rclient read msg is failed",log.ErrorAttr("error",err))
log.Error("RClient read msg is failed", log.ErrorField("error", err))
return
}
bCompress := (bytes[0]>>7) > 0
processor := GetProcessor(bytes[0]&0x7f)
if processor == nil {
rc.conn.ReleaseReadMsg(bytes)
log.Error("cannot find process",log.Uint8("process type",bytes[0]&0x7f))
return
}
//1.解析head
response := RpcResponse{}
response.RpcResponseData = processor.MakeRpcResponse(0, "", nil)
//解压缩
byteData := bytes[1:]
var compressBuff []byte
if bCompress == true {
var unCompressErr error
compressBuff,unCompressErr = compressor.UncompressBlock(byteData)
if unCompressErr!= nil {
rc.conn.ReleaseReadMsg(bytes)
log.Error("uncompressBlock failed",log.ErrorAttr("error",unCompressErr))
return
}
byteData = compressBuff
}
err = processor.Unmarshal(byteData, response.RpcResponseData)
if cap(compressBuff) > 0 {
compressor.UnCompressBufferCollection(compressBuff)
}
err = rc.selfClient.processRpcResponse(bytes)
rc.conn.ReleaseReadMsg(bytes)
if err != nil {
processor.ReleaseRpcResponse(response.RpcResponseData)
log.Error("rpcClient Unmarshal head error",log.ErrorAttr("error",err))
continue
return
}
v := rc.selfClient.RemovePending(response.RpcResponseData.GetSeq())
if v == nil {
log.Error("rpcClient cannot find seq",log.Uint64("seq",response.RpcResponseData.GetSeq()))
} else {
v.Err = nil
if len(response.RpcResponseData.GetReply()) > 0 {
err = processor.Unmarshal(response.RpcResponseData.GetReply(), v.Reply)
if err != nil {
log.Error("rpcClient Unmarshal body failed",log.ErrorAttr("error",err))
v.Err = err
}
}
if response.RpcResponseData.GetErr() != nil {
v.Err = response.RpcResponseData.GetErr()
}
if v.callback != nil && v.callback.IsValid() {
v.rpcHandler.PushRpcResponse(v)
} else {
v.done <- v
}
}
processor.ReleaseRpcResponse(response.RpcResponseData)
}
}
func (rc *RClient) OnClose() {
rc.TriggerRpcConnEvent(false, rc.selfClient.GetClientId(), rc.selfClient.GetNodeId())
var connEvent RpcConnEvent
connEvent.IsConnect = false
connEvent.NodeId = rc.selfClient.GetTargetNodeId()
rc.notifyEventFun(&connEvent)
}
func NewRClient(nodeId int, addr string, maxRpcParamLen uint32,compressBytesLen int,triggerRpcConnEvent TriggerRpcConnEvent) *Client{
func NewRClient(targetNodeId string, addr string, maxRpcParamLen uint32, compressBytesLen int, callSet *CallSet, notifyEventFun NotifyEventFun) *Client {
client := &Client{}
client.clientId = atomic.AddUint32(&clientSeq, 1)
client.nodeId = nodeId
client.maxCheckCallRpcCount = DefaultMaxCheckCallRpcCount
client.callRpcTimeout = DefaultRpcTimeout
c:= &RClient{}
c.compressBytesLen = compressBytesLen
client.targetNodeId = targetNodeId
client.compressBytesLen = compressBytesLen
c := &RClient{}
c.selfClient = client
c.Addr = addr
c.ConnectInterval = DefaultConnectInterval
c.PendingWriteNum = DefaultMaxPendingWriteNum
c.AutoReconnect = true
c.TriggerRpcConnEvent = triggerRpcConnEvent
c.notifyEventFun = notifyEventFun
c.ConnNum = DefaultRpcConnNum
c.LenMsgLen = DefaultRpcLenMsgLen
c.MinMsgLen = DefaultRpcMinMsgLen
@@ -306,15 +132,16 @@ func NewRClient(nodeId int, addr string, maxRpcParamLen uint32,compressBytesLen
c.MaxMsgLen = math.MaxUint32
}
client.IRealClient = c
client.InitPending()
go client.checkRpcCallTimeout()
client.CallSet = callSet
c.Start()
return client
}
func (rc *RClient) Close(waitDone bool) {
rc.TCPClient.Close(waitDone)
rc.selfClient.cleanPending()
}
func (rc *RClient) Bind(server IServer) {
}

View File

@@ -1,7 +1,7 @@
package rpc
import (
"github.com/duanhf2012/origin/util/sync"
"github.com/duanhf2012/origin/v2/util/sync"
"reflect"
"time"
)

23
rpc/rpcevent.go Normal file
View File

@@ -0,0 +1,23 @@
package rpc
import "github.com/duanhf2012/origin/v2/event"
type NotifyEventFun func (event event.IEvent)
// RpcConnEvent Node结点连接事件
type RpcConnEvent struct{
IsConnect bool
NodeId string
}
func (rc *RpcConnEvent) GetEventType() event.EventType{
return event.Sys_Event_Node_Conn_Event
}
type NatsConnEvent struct {
IsConnect bool
}
func (nc *NatsConnEvent) GetEventType() event.EventType{
return event.Sys_Event_Nats_Conn_Event
}

View File

@@ -3,20 +3,22 @@ package rpc
import (
"errors"
"fmt"
"github.com/duanhf2012/origin/log"
"github.com/duanhf2012/origin/v2/event"
"github.com/duanhf2012/origin/v2/log"
"reflect"
"runtime"
"strings"
"time"
"unicode"
"unicode/utf8"
"time"
)
const maxClusterNode int = 128
const maxClusterNode int = 32
type FuncRpcClient func(nodeId int, serviceMethod string, client []*Client) (error, int)
type FuncRpcServer func() *Server
type FuncRpcClient func(nodeId string, serviceMethod string, filterRetire bool, client []*Client) (error, []*Client)
type FuncRpcServer func() IServer
const NodeIdNull = ""
var nilError = reflect.Zero(reflect.TypeOf((*error)(nil)).Elem())
@@ -46,7 +48,7 @@ type RpcMethodInfo struct {
rpcProcessorType RpcProcessorType
}
type RawRpcCallBack func(rawData []byte)
type RawRpcCallBack func(rawData []byte)
type IRpcHandlerChannel interface {
PushRpcResponse(call *Call) error
@@ -62,22 +64,30 @@ type RpcHandler struct {
funcRpcClient FuncRpcClient
funcRpcServer FuncRpcServer
pClientList []*Client
//pClientList []*Client
}
type TriggerRpcConnEvent func(bConnect bool, clientSeq uint32, nodeId int)
type INodeListener interface {
OnNodeConnected(nodeId int)
OnNodeDisconnect(nodeId int)
// NotifyEventToAllService type TriggerRpcConnEvent func(bConnect bool, clientSeq uint32, nodeId string)
type NotifyEventToAllService func(event event.IEvent)
type INodeConnListener interface {
OnNodeConnected(nodeId string)
OnNodeDisconnect(nodeId string)
}
type INatsConnListener interface {
OnNatsConnected()
OnNatsDisconnect()
}
type IDiscoveryServiceListener interface {
OnDiscoveryService(nodeId int, serviceName []string)
OnUnDiscoveryService(nodeId int, serviceName []string)
OnDiscoveryService(nodeId string, serviceName []string)
OnUnDiscoveryService(nodeId string, serviceName []string)
}
type CancelRpc func()
func emptyCancelRpc(){}
func emptyCancelRpc() {}
type IRpcHandler interface {
IRpcHandlerChannel
@@ -86,23 +96,22 @@ type IRpcHandler interface {
GetRpcHandler() IRpcHandler
HandlerRpcRequest(request *RpcRequest)
HandlerRpcResponseCB(call *Call)
CallMethod(client *Client,ServiceMethod string, param interface{},callBack reflect.Value, reply interface{}) error
CallMethod(client *Client, ServiceMethod string, param interface{}, callBack reflect.Value, reply interface{}) error
Call(serviceMethod string, args interface{}, reply interface{}) error
CallNode(nodeId int, serviceMethod string, args interface{}, reply interface{}) error
CallNode(nodeId string, serviceMethod string, args interface{}, reply interface{}) error
AsyncCall(serviceMethod string, args interface{}, callback interface{}) error
AsyncCallNode(nodeId int, serviceMethod string, args interface{}, callback interface{}) error
AsyncCallNode(nodeId string, serviceMethod string, args interface{}, callback interface{}) error
CallWithTimeout(timeout time.Duration,serviceMethod string, args interface{}, reply interface{}) error
CallNodeWithTimeout(timeout time.Duration,nodeId int, serviceMethod string, args interface{}, reply interface{}) error
AsyncCallWithTimeout(timeout time.Duration,serviceMethod string, args interface{}, callback interface{}) (CancelRpc,error)
AsyncCallNodeWithTimeout(timeout time.Duration,nodeId int, serviceMethod string, args interface{}, callback interface{}) (CancelRpc,error)
CallWithTimeout(timeout time.Duration, serviceMethod string, args interface{}, reply interface{}) error
CallNodeWithTimeout(timeout time.Duration, nodeId string, serviceMethod string, args interface{}, reply interface{}) error
AsyncCallWithTimeout(timeout time.Duration, serviceMethod string, args interface{}, callback interface{}) (CancelRpc, error)
AsyncCallNodeWithTimeout(timeout time.Duration, nodeId string, serviceMethod string, args interface{}, callback interface{}) (CancelRpc, error)
Go(serviceMethod string, args interface{}) error
GoNode(nodeId int, serviceMethod string, args interface{}) error
RawGoNode(rpcProcessorType RpcProcessorType, nodeId int, rpcMethodId uint32, serviceName string, rawArgs []byte) error
GoNode(nodeId string, serviceMethod string, args interface{}) error
RawGoNode(rpcProcessorType RpcProcessorType, nodeId string, rpcMethodId uint32, serviceName string, rawArgs []byte) error
CastGo(serviceMethod string, args interface{}) error
IsSingleCoroutine() bool
UnmarshalInParam(rpcProcessor IRpcProcessor, serviceMethod string, rawRpcMethodId uint32, inParam []byte) (interface{}, error)
GetRpcServer() FuncRpcServer
}
@@ -127,7 +136,6 @@ func (handler *RpcHandler) InitRpcHandler(rpcHandler IRpcHandler, getClientFun F
handler.mapFunctions = map[string]RpcMethodInfo{}
handler.funcRpcClient = getClientFun
handler.funcRpcServer = getServerFun
handler.pClientList = make([]*Client, maxClusterNode)
handler.RegisterRpc(rpcHandler)
}
@@ -156,13 +164,6 @@ func (handler *RpcHandler) suitableMethods(method reflect.Method) error {
//取出输入参数类型
var rpcMethodInfo RpcMethodInfo
typ := method.Type
if typ.NumOut() != 1 {
return fmt.Errorf("%s The number of returned arguments must be 1", method.Name)
}
if typ.Out(0).String() != "error" {
return fmt.Errorf("%s The return parameter must be of type error", method.Name)
}
if typ.NumIn() < 2 || typ.NumIn() > 4 {
return fmt.Errorf("%s Unsupported parameter format", method.Name)
@@ -175,6 +176,18 @@ func (handler *RpcHandler) suitableMethods(method reflect.Method) error {
rpcMethodInfo.hasResponder = true
}
if rpcMethodInfo.hasResponder && typ.NumOut() > 0 {
return fmt.Errorf("%s should not have return parameters", method.Name)
}
if !rpcMethodInfo.hasResponder && typ.NumOut() != 1 {
return fmt.Errorf("%s The number of returned arguments must be 1", method.Name)
}
if !rpcMethodInfo.hasResponder && typ.Out(0).String() != "error" {
return fmt.Errorf("%s The return parameter must be of type error", method.Name)
}
for i := parIdx; i < typ.NumIn(); i++ {
if handler.isExportedOrBuiltinType(typ.In(i)) == false {
return fmt.Errorf("%s Unsupported parameter types", method.Name)
@@ -212,10 +225,7 @@ func (handler *RpcHandler) RegisterRpc(rpcHandler IRpcHandler) error {
func (handler *RpcHandler) HandlerRpcResponseCB(call *Call) {
defer func() {
if r := recover(); r != nil {
buf := make([]byte, 4096)
l := runtime.Stack(buf, false)
errString := fmt.Sprint(r)
log.Dump(string(buf[:l]),log.String("error",errString))
log.StackError(fmt.Sprint(r))
}
}()
@@ -234,10 +244,7 @@ func (handler *RpcHandler) HandlerRpcRequest(request *RpcRequest) {
defer func() {
if r := recover(); r != nil {
buf := make([]byte, 4096)
l := runtime.Stack(buf, false)
errString := fmt.Sprint(r)
log.Dump(string(buf[:l]),log.String("error",errString))
log.StackError(fmt.Sprint(r))
rpcErr := RpcError("call error : core dumps")
if request.requestHandle != nil {
request.requestHandle(nil, rpcErr)
@@ -250,12 +257,12 @@ func (handler *RpcHandler) HandlerRpcRequest(request *RpcRequest) {
if rawRpcId > 0 {
v, ok := handler.mapRawFunctions[rawRpcId]
if ok == false {
log.Error("RpcHandler cannot find request rpc id",log.Uint32("rawRpcId",rawRpcId))
log.Error("RpcHandler cannot find request rpc id", log.Uint32("rawRpcId", rawRpcId))
return
}
rawData,ok := request.inParam.([]byte)
rawData, ok := request.inParam.([]byte)
if ok == false {
log.Error("RpcHandler cannot convert",log.String("RpcHandlerName",handler.rpcHandler.GetName()),log.Uint32("rawRpcId",rawRpcId))
log.Error("RpcHandler cannot convert", log.String("RpcHandlerName", handler.rpcHandler.GetName()), log.Uint32("rawRpcId", rawRpcId))
return
}
@@ -266,8 +273,8 @@ func (handler *RpcHandler) HandlerRpcRequest(request *RpcRequest) {
//普通的rpc请求
v, ok := handler.mapFunctions[request.RpcRequestData.GetServiceMethod()]
if ok == false {
err := "RpcHandler " + handler.rpcHandler.GetName() + "cannot find " + request.RpcRequestData.GetServiceMethod()
log.Error("HandlerRpcRequest cannot find serviceMethod",log.String("RpcHandlerName",handler.rpcHandler.GetName()),log.String("serviceMethod",request.RpcRequestData.GetServiceMethod()))
err := "RpcHandler " + handler.rpcHandler.GetName() + " cannot find " + request.RpcRequestData.GetServiceMethod()
log.Error("HandlerRpcRequest cannot find serviceMethod", log.String("RpcHandlerName", handler.rpcHandler.GetName()), log.String("serviceMethod", request.RpcRequestData.GetServiceMethod()))
if request.requestHandle != nil {
request.requestHandle(nil, RpcError(err))
}
@@ -298,29 +305,31 @@ func (handler *RpcHandler) HandlerRpcRequest(request *RpcRequest) {
paramList = append(paramList, oParam) //输出参数
} else if request.requestHandle != nil && v.hasResponder == false { //调用方有返回值,但被调用函数没有返回参数
rErr := "Call Rpc " + request.RpcRequestData.GetServiceMethod() + " without return parameter!"
log.Error("call serviceMethod without return parameter",log.String("serviceMethod",request.RpcRequestData.GetServiceMethod()))
log.Error("call serviceMethod without return parameter", log.String("serviceMethod", request.RpcRequestData.GetServiceMethod()))
request.requestHandle(nil, RpcError(rErr))
return
}
requestHanle := request.requestHandle
requestHandle := request.requestHandle
returnValues := v.method.Func.Call(paramList)
errInter := returnValues[0].Interface()
if errInter != nil {
err = errInter.(error)
if len(returnValues) > 0 {
errInter := returnValues[0].Interface()
if errInter != nil {
err = errInter.(error)
}
}
if v.hasResponder == false && requestHanle != nil {
requestHanle(oParam.Interface(), ConvertError(err))
if v.hasResponder == false && requestHandle != nil {
requestHandle(oParam.Interface(), ConvertError(err))
}
}
func (handler *RpcHandler) CallMethod(client *Client,ServiceMethod string, param interface{},callBack reflect.Value, reply interface{}) error {
func (handler *RpcHandler) CallMethod(client *Client, ServiceMethod string, param interface{}, callBack reflect.Value, reply interface{}) error {
var err error
v, ok := handler.mapFunctions[ServiceMethod]
if ok == false {
err = errors.New("RpcHandler " + handler.rpcHandler.GetName() + " cannot find" + ServiceMethod)
log.Error("CallMethod cannot find serviceMethod",log.String("rpcHandlerName",handler.rpcHandler.GetName()),log.String("serviceMethod",ServiceMethod))
log.Error("CallMethod cannot find serviceMethod", log.String("rpcHandlerName", handler.rpcHandler.GetName()), log.String("serviceMethod", ServiceMethod))
return err
}
@@ -341,31 +350,31 @@ func (handler *RpcHandler) CallMethod(client *Client,ServiceMethod string, param
//有返回值时
if reply != nil {
//如果是Call同步调用
hander :=func(Returns interface{}, Err RpcError) {
hd := func(Returns interface{}, Err RpcError) {
rpcCall := client.RemovePending(callSeq)
if rpcCall == nil {
log.Error("cannot find call seq",log.Uint64("seq",callSeq))
log.Error("cannot find call seq", log.Uint64("seq", callSeq))
return
}
//解析数据
if len(Err)!=0 {
if len(Err) != 0 {
rpcCall.Err = Err
}else if Returns != nil {
} else if Returns != nil {
_, processor := GetProcessorType(Returns)
var bytes []byte
bytes,rpcCall.Err = processor.Marshal(Returns)
bytes, rpcCall.Err = processor.Marshal(Returns)
if rpcCall.Err == nil {
rpcCall.Err = processor.Unmarshal(bytes,reply)
rpcCall.Err = processor.Unmarshal(bytes, reply)
}
}
//如果找不到,说明已经超时
rpcCall.Reply = reply
rpcCall.done<-rpcCall
rpcCall.done <- rpcCall
}
paramList = append(paramList, reflect.ValueOf(hander))
}else{//无返回值时,是一个requestHandlerNull空回调
paramList = append(paramList, reflect.ValueOf(hd))
} else { //无返回值时,是一个requestHandlerNull空回调
paramList = append(paramList, callBack)
}
paramList = append(paramList, reflect.ValueOf(param))
@@ -375,11 +384,11 @@ func (handler *RpcHandler) CallMethod(client *Client,ServiceMethod string, param
//判断返回值是否错误,有错误时则回调
errInter := returnValues[0].Interface()
if errInter != nil && callBack!=requestHandlerNull{
if errInter != nil && callBack != requestHandlerNull {
err = errInter.(error)
callBack.Call([]reflect.Value{reflect.ValueOf(reply), reflect.ValueOf(err)})
}
}else{
} else {
paramList = append(paramList, reflect.ValueOf(handler.GetRpcHandler())) //接受者
paramList = append(paramList, reflect.ValueOf(param))
@@ -388,7 +397,7 @@ func (handler *RpcHandler) CallMethod(client *Client,ServiceMethod string, param
//不带返回值参数的RPC函数
if reply == nil {
paramList = append(paramList, reflect.New(v.outParamValue.Type().Elem()))
}else{
} else {
//带返回值参数的RPC函数
paramList = append(paramList, reflect.ValueOf(reply)) //输出参数
}
@@ -405,14 +414,14 @@ func (handler *RpcHandler) CallMethod(client *Client,ServiceMethod string, param
valErr = reflect.ValueOf(err)
}
callBack.Call([]reflect.Value{reflect.ValueOf(reply),valErr })
callBack.Call([]reflect.Value{reflect.ValueOf(reply), valErr})
}
}
rpcCall := client.FindPending(callSeq)
if rpcCall!=nil {
err = rpcCall.Done().Err
if rpcCall.callback!= nil {
if rpcCall != nil {
err = rpcCall.Done().Err
if rpcCall.callback != nil {
valErr := nilError
if rpcCall.Err != nil {
valErr = reflect.ValueOf(rpcCall.Err)
@@ -426,26 +435,26 @@ func (handler *RpcHandler) CallMethod(client *Client,ServiceMethod string, param
return err
}
func (handler *RpcHandler) goRpc(processor IRpcProcessor, bCast bool, nodeId int, serviceMethod string, args interface{}) error {
var pClientList [maxClusterNode]*Client
err, count := handler.funcRpcClient(nodeId, serviceMethod, pClientList[:])
if count == 0 {
func (handler *RpcHandler) goRpc(processor IRpcProcessor, bCast bool, nodeId string, serviceMethod string, args interface{}) error {
pClientList := make([]*Client, 0, maxClusterNode)
err, pClientList := handler.funcRpcClient(nodeId, serviceMethod, false, pClientList)
if len(pClientList) == 0 {
if err != nil {
log.Error("call serviceMethod is failed",log.String("serviceMethod",serviceMethod),log.ErrorAttr("error",err))
log.Error("call serviceMethod is failed", log.String("serviceMethod", serviceMethod), log.ErrorField("error", err))
} else {
log.Error("cannot find serviceMethod",log.String("serviceMethod",serviceMethod))
log.Error("cannot find serviceMethod", log.String("serviceMethod", serviceMethod))
}
return err
}
if count > 1 && bCast == false {
log.Error("cannot call serviceMethod more then 1 node",log.String("serviceMethod",serviceMethod))
if len(pClientList) > 1 && bCast == false {
log.Error("cannot call serviceMethod more then 1 node", log.String("serviceMethod", serviceMethod))
return errors.New("cannot call more then 1 node")
}
//2.rpcClient调用
for i := 0; i < count; i++ {
pCall := pClientList[i].Go(DefaultRpcTimeout,handler.rpcHandler,true, serviceMethod, args, nil)
for i := 0; i < len(pClientList); i++ {
pCall := pClientList[i].Go(pClientList[i].GetTargetNodeId(), DefaultRpcTimeout, handler.rpcHandler, true, serviceMethod, args, nil)
if pCall.Err != nil {
err = pCall.Err
}
@@ -456,23 +465,23 @@ func (handler *RpcHandler) goRpc(processor IRpcProcessor, bCast bool, nodeId int
return err
}
func (handler *RpcHandler) callRpc(timeout time.Duration,nodeId int, serviceMethod string, args interface{}, reply interface{}) error {
var pClientList [maxClusterNode]*Client
err, count := handler.funcRpcClient(nodeId, serviceMethod, pClientList[:])
func (handler *RpcHandler) callRpc(timeout time.Duration, nodeId string, serviceMethod string, args interface{}, reply interface{}) error {
pClientList := make([]*Client, 0, maxClusterNode)
err, pClientList := handler.funcRpcClient(nodeId, serviceMethod, false, pClientList)
if err != nil {
log.Error("Call serviceMethod is failed",log.ErrorAttr("error",err))
log.Error("Call serviceMethod is failed", log.ErrorField("error", err))
return err
} else if count <= 0 {
} else if len(pClientList) <= 0 {
err = errors.New("Call serviceMethod is error:cannot find " + serviceMethod)
log.Error("cannot find serviceMethod",log.String("serviceMethod",serviceMethod))
log.Error("cannot find serviceMethod", log.String("serviceMethod", serviceMethod))
return err
} else if count > 1 {
log.Error("Cannot call more then 1 node!",log.String("serviceMethod",serviceMethod))
} else if len(pClientList) > 1 {
log.Error("Cannot call more then 1 node!", log.String("serviceMethod", serviceMethod))
return errors.New("cannot call more then 1 node")
}
pClient := pClientList[0]
pCall := pClient.Go(timeout,handler.rpcHandler,false, serviceMethod, args, reply)
pCall := pClient.Go(pClient.GetTargetNodeId(), timeout, handler.rpcHandler, false, serviceMethod, args, reply)
err = pCall.Done().Err
pClient.RemovePending(pCall.Seq)
@@ -480,132 +489,128 @@ func (handler *RpcHandler) callRpc(timeout time.Duration,nodeId int, serviceMeth
return err
}
func (handler *RpcHandler) asyncCallRpc(timeout time.Duration,nodeId int, serviceMethod string, args interface{}, callback interface{}) (CancelRpc,error) {
func (handler *RpcHandler) asyncCallRpc(timeout time.Duration, nodeId string, serviceMethod string, args interface{}, callback interface{}) (CancelRpc, error) {
fVal := reflect.ValueOf(callback)
if fVal.Kind() != reflect.Func {
err := errors.New("call " + serviceMethod + " input callback param is error!")
log.Error("input callback param is error",log.String("serviceMethod",serviceMethod))
return emptyCancelRpc,err
log.Error("input callback param is error", log.String("serviceMethod", serviceMethod))
return emptyCancelRpc, err
}
if fVal.Type().NumIn() != 2 {
err := errors.New("call " + serviceMethod + " callback param function is error!")
log.Error("callback param function is error",log.String("serviceMethod",serviceMethod))
return emptyCancelRpc,err
log.Error("callback param function is error", log.String("serviceMethod", serviceMethod))
return emptyCancelRpc, err
}
if fVal.Type().In(0).Kind() != reflect.Ptr || fVal.Type().In(1).String() != "error" {
err := errors.New("call " + serviceMethod + " callback param function is error!")
log.Error("callback param function is error",log.String("serviceMethod",serviceMethod))
return emptyCancelRpc,err
log.Error("callback param function is error", log.String("serviceMethod", serviceMethod))
return emptyCancelRpc, err
}
reply := reflect.New(fVal.Type().In(0).Elem()).Interface()
var pClientList [2]*Client
err, count := handler.funcRpcClient(nodeId, serviceMethod, pClientList[:])
if count == 0 || err != nil {
pClientList := make([]*Client, 0, 1)
err, pClientList := handler.funcRpcClient(nodeId, serviceMethod, false, pClientList[:])
if len(pClientList) == 0 || err != nil {
if err == nil {
if nodeId > 0 {
err = fmt.Errorf("cannot find %s from nodeId %d",serviceMethod,nodeId)
}else {
err = fmt.Errorf("No %s service found in the origin network",serviceMethod)
if nodeId != NodeIdNull {
err = fmt.Errorf("cannot find %s from nodeId %s", serviceMethod, nodeId)
} else {
err = fmt.Errorf("no %s service found in the origin network", serviceMethod)
}
}
fVal.Call([]reflect.Value{reflect.ValueOf(reply), reflect.ValueOf(err)})
log.Error("cannot find serviceMethod from node",log.String("serviceMethod",serviceMethod),log.Int("nodeId",nodeId))
return emptyCancelRpc,nil
log.Error("cannot find serviceMethod from node", log.String("serviceMethod", serviceMethod), log.String("nodeId", nodeId))
return emptyCancelRpc, nil
}
if count > 1 {
if len(pClientList) > 1 {
err := errors.New("cannot call more then 1 node")
fVal.Call([]reflect.Value{reflect.ValueOf(reply), reflect.ValueOf(err)})
log.Error("cannot call more then 1 node",log.String("serviceMethod",serviceMethod))
return emptyCancelRpc,nil
log.Error("cannot call more then 1 node", log.String("serviceMethod", serviceMethod))
return emptyCancelRpc, nil
}
//2.rpcClient调用
//如果调用本结点服务
return pClientList[0].AsyncCall(timeout,handler.rpcHandler, serviceMethod, fVal, args, reply,false)
return pClientList[0].AsyncCall(pClientList[0].GetTargetNodeId(), timeout, handler.rpcHandler, serviceMethod, fVal, args, reply, )
}
func (handler *RpcHandler) GetName() string {
return handler.rpcHandler.GetName()
}
func (handler *RpcHandler) IsSingleCoroutine() bool {
return handler.rpcHandler.IsSingleCoroutine()
func (handler *RpcHandler) CallWithTimeout(timeout time.Duration, serviceMethod string, args interface{}, reply interface{}) error {
return handler.callRpc(timeout, NodeIdNull, serviceMethod, args, reply)
}
func (handler *RpcHandler) CallWithTimeout(timeout time.Duration,serviceMethod string, args interface{}, reply interface{}) error {
return handler.callRpc(timeout,0, serviceMethod, args, reply)
func (handler *RpcHandler) CallNodeWithTimeout(timeout time.Duration, nodeId string, serviceMethod string, args interface{}, reply interface{}) error {
return handler.callRpc(timeout, nodeId, serviceMethod, args, reply)
}
func (handler *RpcHandler) CallNodeWithTimeout(timeout time.Duration,nodeId int, serviceMethod string, args interface{}, reply interface{}) error{
return handler.callRpc(timeout,nodeId, serviceMethod, args, reply)
func (handler *RpcHandler) AsyncCallWithTimeout(timeout time.Duration, serviceMethod string, args interface{}, callback interface{}) (CancelRpc, error) {
return handler.asyncCallRpc(timeout, NodeIdNull, serviceMethod, args, callback)
}
func (handler *RpcHandler) AsyncCallWithTimeout(timeout time.Duration,serviceMethod string, args interface{}, callback interface{}) (CancelRpc,error){
return handler.asyncCallRpc(timeout,0, serviceMethod, args, callback)
}
func (handler *RpcHandler) AsyncCallNodeWithTimeout(timeout time.Duration,nodeId int, serviceMethod string, args interface{}, callback interface{}) (CancelRpc,error){
return handler.asyncCallRpc(timeout,nodeId, serviceMethod, args, callback)
func (handler *RpcHandler) AsyncCallNodeWithTimeout(timeout time.Duration, nodeId string, serviceMethod string, args interface{}, callback interface{}) (CancelRpc, error) {
return handler.asyncCallRpc(timeout, nodeId, serviceMethod, args, callback)
}
func (handler *RpcHandler) AsyncCall(serviceMethod string, args interface{}, callback interface{}) error {
_,err := handler.asyncCallRpc(DefaultRpcTimeout,0, serviceMethod, args, callback)
_, err := handler.asyncCallRpc(DefaultRpcTimeout, NodeIdNull, serviceMethod, args, callback)
return err
}
func (handler *RpcHandler) Call(serviceMethod string, args interface{}, reply interface{}) error {
return handler.callRpc(DefaultRpcTimeout,0, serviceMethod, args, reply)
return handler.callRpc(DefaultRpcTimeout, NodeIdNull, serviceMethod, args, reply)
}
func (handler *RpcHandler) Go(serviceMethod string, args interface{}) error {
return handler.goRpc(nil, false, 0, serviceMethod, args)
return handler.goRpc(nil, false, NodeIdNull, serviceMethod, args)
}
func (handler *RpcHandler) AsyncCallNode(nodeId int, serviceMethod string, args interface{}, callback interface{}) error {
_,err:= handler.asyncCallRpc(DefaultRpcTimeout,nodeId, serviceMethod, args, callback)
func (handler *RpcHandler) AsyncCallNode(nodeId string, serviceMethod string, args interface{}, callback interface{}) error {
_, err := handler.asyncCallRpc(DefaultRpcTimeout, nodeId, serviceMethod, args, callback)
return err
}
func (handler *RpcHandler) CallNode(nodeId int, serviceMethod string, args interface{}, reply interface{}) error {
return handler.callRpc(DefaultRpcTimeout,nodeId, serviceMethod, args, reply)
func (handler *RpcHandler) CallNode(nodeId string, serviceMethod string, args interface{}, reply interface{}) error {
return handler.callRpc(DefaultRpcTimeout, nodeId, serviceMethod, args, reply)
}
func (handler *RpcHandler) GoNode(nodeId int, serviceMethod string, args interface{}) error {
func (handler *RpcHandler) GoNode(nodeId string, serviceMethod string, args interface{}) error {
return handler.goRpc(nil, false, nodeId, serviceMethod, args)
}
func (handler *RpcHandler) CastGo(serviceMethod string, args interface{}) error {
return handler.goRpc(nil, true, 0, serviceMethod, args)
return handler.goRpc(nil, true, NodeIdNull, serviceMethod, args)
}
func (handler *RpcHandler) RawGoNode(rpcProcessorType RpcProcessorType, nodeId int, rpcMethodId uint32, serviceName string, rawArgs []byte) error {
func (handler *RpcHandler) RawGoNode(rpcProcessorType RpcProcessorType, nodeId string, rpcMethodId uint32, serviceName string, rawArgs []byte) error {
processor := GetProcessor(uint8(rpcProcessorType))
err, count := handler.funcRpcClient(nodeId, serviceName, handler.pClientList)
if count == 0 || err != nil {
log.Error("call serviceMethod is failed",log.ErrorAttr("error",err))
pClientList := make([]*Client, 0, 1)
err, pClientList := handler.funcRpcClient(nodeId, serviceName, false, pClientList)
if len(pClientList) == 0 || err != nil {
log.Error("call serviceMethod is failed", log.ErrorField("error", err))
return err
}
if count > 1 {
if len(pClientList) > 1 {
err := errors.New("cannot call more then 1 node")
log.Error("cannot call more then 1 node",log.String("serviceName",serviceName))
log.Error("cannot call more then 1 node", log.String("serviceName", serviceName))
return err
}
//2.rpcClient调用
//如果调用本结点服务
for i := 0; i < count; i++ {
for i := 0; i < len(pClientList); i++ {
//跨node调用
pCall := handler.pClientList[i].RawGo(DefaultRpcTimeout,handler.rpcHandler,processor, true, rpcMethodId, serviceName, rawArgs, nil)
pCall := pClientList[i].RawGo(pClientList[i].GetTargetNodeId(), DefaultRpcTimeout, handler.rpcHandler, processor, true, rpcMethodId, serviceName, rawArgs, nil)
if pCall.Err != nil {
err = pCall.Err
}
handler.pClientList[i].RemovePending(pCall.Seq)
pClientList[i].RemovePending(pCall.Seq)
ReleaseCall(pCall)
}
@@ -618,7 +623,7 @@ func (handler *RpcHandler) RegRawRpc(rpcMethodId uint32, rawRpcCB RawRpcCallBack
func (handler *RpcHandler) UnmarshalInParam(rpcProcessor IRpcProcessor, serviceMethod string, rawRpcMethodId uint32, inParam []byte) (interface{}, error) {
if rawRpcMethodId > 0 {
return inParam,nil
return inParam, nil
}
v, ok := handler.mapFunctions[serviceMethod]
@@ -632,7 +637,6 @@ func (handler *RpcHandler) UnmarshalInParam(rpcProcessor IRpcProcessor, serviceM
return param, err
}
func (handler *RpcHandler) GetRpcServer() FuncRpcServer{
func (handler *RpcHandler) GetRpcServer() FuncRpcServer {
return handler.funcRpcServer
}

39
rpc/rpcnats.go Normal file
View File

@@ -0,0 +1,39 @@
package rpc
import "sync/atomic"
type RpcNats struct {
NatsServer
NatsClient
}
func (rn *RpcNats) Start() error{
err := rn.NatsServer.Start()
if err != nil {
return err
}
return rn.NatsClient.Start(rn.NatsServer.natsConn)
}
func (rn *RpcNats) Init(natsUrl string, noRandomize bool, nodeId string,compressBytesLen int,rpcHandleFinder RpcHandleFinder,notifyEventFun NotifyEventFun){
rn.NatsClient.localNodeId = nodeId
rn.NatsServer.initServer(natsUrl,noRandomize, nodeId,compressBytesLen,rpcHandleFinder,notifyEventFun)
rn.NatsServer.iServer = rn
}
func (rn *RpcNats) NewNatsClient(targetNodeId string,localNodeId string,callSet *CallSet,notifyEventFun NotifyEventFun) *Client{
var client Client
client.clientId = atomic.AddUint32(&clientSeq, 1)
client.targetNodeId = targetNodeId
natsClient := &rn.NatsClient
natsClient.localNodeId = localNodeId
natsClient.client = &client
natsClient.notifyEventFun = notifyEventFun
client.IRealClient = natsClient
client.CallSet = callSet
return &client
}

View File

@@ -1,35 +1,49 @@
package rpc
import (
"errors"
"fmt"
"github.com/duanhf2012/origin/log"
"github.com/duanhf2012/origin/network"
"github.com/duanhf2012/origin/v2/log"
"github.com/duanhf2012/origin/v2/network"
"math"
"net"
"reflect"
"strings"
"time"
"runtime"
)
const Default_ReadWriteDeadline = 15 * time.Second
type RpcProcessorType uint8
const (
RpcProcessorJson RpcProcessorType = 0
RpcProcessorGoGoPB RpcProcessorType = 1
RpcProcessorJson RpcProcessorType = 0
RpcProcessorPB RpcProcessorType = 1
)
var arrayProcessor = []IRpcProcessor{&JsonProcessor{}, &PBProcessor{}}
var arrayProcessorLen uint8 = 2
var LittleEndian bool
type Server struct {
functions map[interface{}]interface{}
rpcHandleFinder RpcHandleFinder
rpcServer *network.TCPServer
type IServer interface {
Start() error
Stop()
compressBytesLen int
selfNodeRpcHandlerGo(timeout time.Duration, processor IRpcProcessor, client *Client, noReply bool, handlerName string, rpcMethodId uint32, serviceMethod string, args interface{}, reply interface{}, rawArgs []byte) *Call
myselfRpcHandlerGo(client *Client, handlerName string, serviceMethod string, args interface{}, callBack reflect.Value, reply interface{}) error
selfNodeRpcHandlerAsyncGo(timeout time.Duration, client *Client, callerRpcHandler IRpcHandler, noReply bool, handlerName string, serviceMethod string, args interface{}, reply interface{}, callback reflect.Value) (CancelRpc, error)
}
type writeResponse func(processor IRpcProcessor, connTag string, serviceMethod string, seq uint64, reply interface{}, rpcError RpcError)
type Server struct {
BaseServer
functions map[interface{}]interface{}
rpcServer *network.TCPServer
listenAddr string
maxRpcParamLen uint32
}
type RpcAgent struct {
@@ -60,24 +74,24 @@ func GetProcessor(processorType uint8) IRpcProcessor {
return arrayProcessor[processorType]
}
func (server *Server) Init(rpcHandleFinder RpcHandleFinder) {
server.rpcHandleFinder = rpcHandleFinder
func (server *Server) Init(listenAddr string, maxRpcParamLen uint32, compressBytesLen int, rpcHandleFinder RpcHandleFinder) {
server.initBaseServer(compressBytesLen, rpcHandleFinder)
server.listenAddr = listenAddr
server.maxRpcParamLen = maxRpcParamLen
server.rpcServer = &network.TCPServer{}
}
const Default_ReadWriteDeadline = 15*time.Second
func (server *Server) Start(listenAddr string, maxRpcParamLen uint32,compressBytesLen int) {
splitAddr := strings.Split(listenAddr, ":")
func (server *Server) Start() error {
splitAddr := strings.Split(server.listenAddr, ":")
if len(splitAddr) != 2 {
log.Fatal("listen addr is failed", log.String("listenAddr",listenAddr))
return fmt.Errorf("listen addr is failed,listenAddr:%s", server.listenAddr)
}
server.rpcServer.Addr = ":" + splitAddr[1]
server.rpcServer.MinMsgLen = 2
server.compressBytesLen = compressBytesLen
if maxRpcParamLen > 0 {
server.rpcServer.MaxMsgLen = maxRpcParamLen
if server.maxRpcParamLen > 0 {
server.rpcServer.MaxMsgLen = server.maxRpcParamLen
} else {
server.rpcServer.MaxMsgLen = math.MaxUint32
}
@@ -90,12 +104,16 @@ func (server *Server) Start(listenAddr string, maxRpcParamLen uint32,compressByt
server.rpcServer.ReadDeadline = Default_ReadWriteDeadline
server.rpcServer.LenMsgLen = DefaultRpcLenMsgLen
server.rpcServer.Start()
return server.rpcServer.Start()
}
func (server *Server) Stop() {
server.rpcServer.Close()
}
func (agent *RpcAgent) OnDestroy() {}
func (agent *RpcAgent) WriteResponse(processor IRpcProcessor, serviceMethod string, seq uint64, reply interface{}, rpcError RpcError) {
func (agent *RpcAgent) WriteResponse(processor IRpcProcessor, connTag string, serviceMethod string, seq uint64, reply interface{}, rpcError RpcError) {
var mReply []byte
var errM error
@@ -112,150 +130,59 @@ func (agent *RpcAgent) WriteResponse(processor IRpcProcessor, serviceMethod stri
defer processor.ReleaseRpcResponse(rpcResponse.RpcResponseData)
if errM != nil {
log.Error("mashal RpcResponseData failed",log.String("serviceMethod",serviceMethod),log.ErrorAttr("error",errM))
log.Error("marshal RpcResponseData failed", log.String("serviceMethod", serviceMethod), log.ErrorField("error", errM))
return
}
var compressBuff[]byte
var compressBuff []byte
bCompress := uint8(0)
if agent.rpcServer.compressBytesLen >0 && len(bytes) >= agent.rpcServer.compressBytesLen {
if agent.rpcServer.compressBytesLen > 0 && len(bytes) >= agent.rpcServer.compressBytesLen {
var cErr error
compressBuff,cErr = compressor.CompressBlock(bytes)
compressBuff, cErr = compressor.CompressBlock(bytes)
if cErr != nil {
log.Error("CompressBlock failed",log.String("serviceMethod",serviceMethod),log.ErrorAttr("error",cErr))
log.Error("CompressBlock failed", log.String("serviceMethod", serviceMethod), log.ErrorField("error", cErr))
return
}
if len(compressBuff) < len(bytes) {
bytes = compressBuff
bCompress = 1<<7
bCompress = 1 << 7
}
}
errM = agent.conn.WriteMsg([]byte{uint8(processor.GetProcessorType())|bCompress}, bytes)
if cap(compressBuff) >0 {
errM = agent.conn.WriteMsg([]byte{uint8(processor.GetProcessorType()) | bCompress}, bytes)
if cap(compressBuff) > 0 {
compressor.CompressBufferCollection(compressBuff)
}
if errM != nil {
log.Error("WriteMsg error,Rpc return is fail",log.String("serviceMethod",serviceMethod),log.ErrorAttr("error",errM))
log.Error("WriteMsg error,Rpc return is fail", log.String("serviceMethod", serviceMethod), log.ErrorField("error", errM))
}
}
func (agent *RpcAgent) Run() {
defer func() {
if r := recover(); r != nil {
buf := make([]byte, 4096)
l := runtime.Stack(buf, false)
errString := fmt.Sprint(r)
log.Dump(string(buf[:l]),log.String("error",errString))
log.StackError(fmt.Sprint(r))
}
}()
for {
data, err := agent.conn.ReadMsg()
if err != nil {
log.Error("read message is error",log.String("remoteAddress",agent.conn.RemoteAddr().String()),log.ErrorAttr("error",err))
//will close tcpconn
//will close conn
log.Error("read message is error", log.String("remoteAddress", agent.conn.RemoteAddr().String()), log.ErrorField("error", err))
break
}
bCompress := (data[0]>>7) > 0
processor := GetProcessor(data[0]&0x7f)
if processor == nil {
err = agent.rpcServer.processRpcRequest(data, "", agent.WriteResponse)
if err != nil {
//will close conn
agent.conn.ReleaseReadMsg(data)
log.Warning("cannot find processor",log.String("RemoteAddr",agent.conn.RemoteAddr().String()))
return
}
log.Error("processRpcRequest is error", log.String("remoteAddress", agent.conn.RemoteAddr().String()), log.ErrorField("error", err))
//解析head
var compressBuff []byte
byteData := data[1:]
if bCompress == true {
var unCompressErr error
compressBuff,unCompressErr = compressor.UncompressBlock(byteData)
if unCompressErr!= nil {
agent.conn.ReleaseReadMsg(data)
log.Error("UncompressBlock failed",log.String("RemoteAddr",agent.conn.RemoteAddr().String()),log.ErrorAttr("error",unCompressErr))
return
}
byteData = compressBuff
}
req := MakeRpcRequest(processor, 0, 0, "", false, nil)
err = processor.Unmarshal(byteData, req.RpcRequestData)
if cap(compressBuff) > 0 {
compressor.UnCompressBufferCollection(compressBuff)
break
}
agent.conn.ReleaseReadMsg(data)
if err != nil {
log.Error("Unmarshal failed",log.String("RemoteAddr",agent.conn.RemoteAddr().String()),log.ErrorAttr("error",err))
if req.RpcRequestData.GetSeq() > 0 {
rpcError := RpcError(err.Error())
if req.RpcRequestData.IsNoReply() == false {
agent.WriteResponse(processor, req.RpcRequestData.GetServiceMethod(), req.RpcRequestData.GetSeq(), nil, rpcError)
}
ReleaseRpcRequest(req)
continue
} else {
ReleaseRpcRequest(req)
break
}
}
//交给程序处理
serviceMethod := strings.Split(req.RpcRequestData.GetServiceMethod(), ".")
if len(serviceMethod) < 1 {
rpcError := RpcError("rpc request req.ServiceMethod is error")
if req.RpcRequestData.IsNoReply() == false {
agent.WriteResponse(processor, req.RpcRequestData.GetServiceMethod(), req.RpcRequestData.GetSeq(), nil, rpcError)
}
ReleaseRpcRequest(req)
log.Error("rpc request req.ServiceMethod is error")
continue
}
rpcHandler := agent.rpcServer.rpcHandleFinder.FindRpcHandler(serviceMethod[0])
if rpcHandler == nil {
rpcError := RpcError(fmt.Sprintf("service method %s not config!", req.RpcRequestData.GetServiceMethod()))
if req.RpcRequestData.IsNoReply() == false {
agent.WriteResponse(processor, req.RpcRequestData.GetServiceMethod(), req.RpcRequestData.GetSeq(), nil, rpcError)
}
log.Error("serviceMethod not config",log.String("serviceMethod",req.RpcRequestData.GetServiceMethod()))
ReleaseRpcRequest(req)
continue
}
if req.RpcRequestData.IsNoReply() == false {
req.requestHandle = func(Returns interface{}, Err RpcError) {
agent.WriteResponse(processor, req.RpcRequestData.GetServiceMethod(), req.RpcRequestData.GetSeq(), Returns, Err)
ReleaseRpcRequest(req)
}
}
req.inParam, err = rpcHandler.UnmarshalInParam(req.rpcProcessor, req.RpcRequestData.GetServiceMethod(), req.RpcRequestData.GetRpcMethodId(), req.RpcRequestData.GetInParam())
if err != nil {
rErr := "Call Rpc " + req.RpcRequestData.GetServiceMethod() + " Param error " + err.Error()
log.Error("call rpc param error",log.String("serviceMethod",req.RpcRequestData.GetServiceMethod()),log.ErrorAttr("error",err))
if req.requestHandle != nil {
req.requestHandle(nil, RpcError(rErr))
} else {
ReleaseRpcRequest(req)
}
continue
}
err = rpcHandler.PushRpcRequest(req)
if err != nil {
rpcError := RpcError(err.Error())
if req.RpcRequestData.IsNoReply() {
agent.WriteResponse(processor, req.RpcRequestData.GetServiceMethod(), req.RpcRequestData.GetSeq(), nil, rpcError)
}
ReleaseRpcRequest(req)
}
}
}
@@ -281,180 +208,8 @@ func (agent *RpcAgent) Destroy() {
agent.conn.Destroy()
}
func (server *Server) NewAgent(c *network.TCPConn) network.Agent {
func (server *Server) NewAgent(c network.Conn) network.Agent {
agent := &RpcAgent{conn: c, rpcServer: server}
return agent
}
func (server *Server) myselfRpcHandlerGo(client *Client,handlerName string, serviceMethod string, args interface{},callBack reflect.Value, reply interface{}) error {
rpcHandler := server.rpcHandleFinder.FindRpcHandler(handlerName)
if rpcHandler == nil {
err := errors.New("service method " + serviceMethod + " not config!")
log.Error("service method not config",log.String("serviceMethod",serviceMethod))
return err
}
return rpcHandler.CallMethod(client,serviceMethod, args,callBack, reply)
}
func (server *Server) selfNodeRpcHandlerGo(timeout time.Duration,processor IRpcProcessor, client *Client, noReply bool, handlerName string, rpcMethodId uint32, serviceMethod string, args interface{}, reply interface{}, rawArgs []byte) *Call {
pCall := MakeCall()
pCall.Seq = client.generateSeq()
pCall.TimeOut = timeout
pCall.ServiceMethod = serviceMethod
rpcHandler := server.rpcHandleFinder.FindRpcHandler(handlerName)
if rpcHandler == nil {
err := errors.New("service method " + serviceMethod + " not config!")
log.Error("service method not config",log.String("serviceMethod",serviceMethod),log.ErrorAttr("error",err))
pCall.Seq = 0
pCall.DoError(err)
return pCall
}
var iParam interface{}
if processor == nil {
_, processor = GetProcessorType(args)
}
if args != nil {
var err error
iParam,err = processor.Clone(args)
if err != nil {
sErr := errors.New("RpcHandler " + handlerName + "."+serviceMethod+" deep copy inParam is error:" + err.Error())
log.Error("deep copy inParam is failed",log.String("handlerName",handlerName),log.String("serviceMethod",serviceMethod))
pCall.Seq = 0
pCall.DoError(sErr)
return pCall
}
}
req := MakeRpcRequest(processor, 0, rpcMethodId, serviceMethod, noReply, nil)
req.inParam = iParam
req.localReply = reply
if rawArgs != nil {
var err error
req.inParam, err = rpcHandler.UnmarshalInParam(processor, serviceMethod, rpcMethodId, rawArgs)
if err != nil {
log.Error("unmarshalInParam is failed",log.String("serviceMethod",serviceMethod),log.Uint32("rpcMethodId",rpcMethodId),log.ErrorAttr("error",err))
pCall.Seq = 0
pCall.DoError(err)
ReleaseRpcRequest(req)
return pCall
}
}
if noReply == false {
client.AddPending(pCall)
callSeq := pCall.Seq
req.requestHandle = func(Returns interface{}, Err RpcError) {
if reply != nil && Returns != reply && Returns != nil {
byteReturns, err := req.rpcProcessor.Marshal(Returns)
if err != nil {
Err = ConvertError(err)
log.Error("returns data cannot be marshal",log.Uint64("seq",callSeq),log.ErrorAttr("error",err))
}else{
err = req.rpcProcessor.Unmarshal(byteReturns, reply)
if err != nil {
Err = ConvertError(err)
log.Error("returns data cannot be Unmarshal",log.Uint64("seq",callSeq),log.ErrorAttr("error",err))
}
}
}
ReleaseRpcRequest(req)
v := client.RemovePending(callSeq)
if v == nil {
log.Error("rpcClient cannot find seq",log.Uint64("seq",callSeq))
return
}
if len(Err) == 0 {
v.Err = nil
v.DoOK()
} else {
log.Error(Err.Error())
v.DoError(Err)
}
}
}
err := rpcHandler.PushRpcRequest(req)
if err != nil {
log.Error(err.Error())
pCall.DoError(err)
ReleaseRpcRequest(req)
}
return pCall
}
func (server *Server) selfNodeRpcHandlerAsyncGo(timeout time.Duration,client *Client, callerRpcHandler IRpcHandler, noReply bool, handlerName string, serviceMethod string, args interface{}, reply interface{}, callback reflect.Value,cancelable bool) (CancelRpc,error) {
rpcHandler := server.rpcHandleFinder.FindRpcHandler(handlerName)
if rpcHandler == nil {
err := errors.New("service method " + serviceMethod + " not config!")
log.Error(err.Error())
return emptyCancelRpc,err
}
_, processor := GetProcessorType(args)
iParam,err := processor.Clone(args)
if err != nil {
errM := errors.New("RpcHandler " + handlerName + "."+serviceMethod+" deep copy inParam is error:" + err.Error())
log.Error(errM.Error())
return emptyCancelRpc,errM
}
req := MakeRpcRequest(processor, 0, 0, serviceMethod, noReply, nil)
req.inParam = iParam
req.localReply = reply
cancelRpc := emptyCancelRpc
var callSeq uint64
if noReply == false {
callSeq = client.generateSeq()
pCall := MakeCall()
pCall.Seq = callSeq
pCall.rpcHandler = callerRpcHandler
pCall.callback = &callback
pCall.Reply = reply
pCall.ServiceMethod = serviceMethod
pCall.TimeOut = timeout
client.AddPending(pCall)
rpcCancel := RpcCancel{CallSeq: callSeq,Cli: client}
cancelRpc = rpcCancel.CancelRpc
req.requestHandle = func(Returns interface{}, Err RpcError) {
v := client.RemovePending(callSeq)
if v == nil {
ReleaseRpcRequest(req)
return
}
if len(Err) == 0 {
v.Err = nil
} else {
v.Err = Err
}
if Returns != nil {
v.Reply = Returns
}
v.rpcHandler.PushRpcResponse(v)
ReleaseRpcRequest(req)
}
}
err = rpcHandler.PushRpcRequest(req)
if err != nil {
ReleaseRpcRequest(req)
if callSeq > 0 {
client.RemovePending(callSeq)
}
return emptyCancelRpc,err
}
return cancelRpc,nil
}

View File

@@ -6,11 +6,12 @@ import (
"sync/atomic"
"time"
"github.com/duanhf2012/origin/event"
"github.com/duanhf2012/origin/log"
rpcHandle "github.com/duanhf2012/origin/rpc"
"github.com/duanhf2012/origin/util/timer"
"github.com/duanhf2012/origin/concurrent"
"github.com/duanhf2012/origin/v2/concurrent"
"github.com/duanhf2012/origin/v2/event"
"github.com/duanhf2012/origin/v2/log"
rpcHandle "github.com/duanhf2012/origin/v2/rpc"
"github.com/duanhf2012/origin/v2/util/timer"
"slices"
)
const InitModuleId = 1e9
@@ -35,9 +36,9 @@ type IModule interface {
}
type IModuleTimer interface {
AfterFunc(d time.Duration, cb func(*timer.Timer)) *timer.Timer
CronFunc(cronExpr *timer.CronExpr, cb func(*timer.Cron)) *timer.Cron
NewTicker(d time.Duration, cb func(*timer.Ticker)) *timer.Ticker
SafeAfterFunc(d time.Duration, cb func(*timer.Timer)) *timer.Timer
SafeCronFunc(cronExpr *timer.CronExpr, cb func(*timer.Cron)) *timer.Cron
SafeNewTicker(d time.Duration, cb func(*timer.Ticker)) *timer.Ticker
}
type Module struct {
@@ -46,7 +47,7 @@ type Module struct {
moduleName string //模块名称
parent IModule //父亲
self IModule //自己
child map[uint32]IModule //孩子们
child []IModule //孩子们
mapActiveTimer map[timer.ITimer]struct{}
mapActiveIdTimer map[uint64]timer.ITimer
dispatcher *timer.Dispatcher //timer
@@ -93,10 +94,7 @@ func (m *Module) AddModule(module IModule) (uint32, error) {
pAddModule.moduleId = m.NewModuleId()
}
if m.child == nil {
m.child = map[uint32]IModule{}
}
_, ok := m.child[module.GetModuleId()]
_,ok := m.ancestor.getBaseModule().(*Module).descendants[module.GetModuleId()]
if ok == true {
return 0, fmt.Errorf("exists module id %d", module.GetModuleId())
}
@@ -109,29 +107,33 @@ func (m *Module) AddModule(module IModule) (uint32, error) {
pAddModule.eventHandler = event.NewEventHandler()
pAddModule.eventHandler.Init(m.eventHandler.GetEventProcessor())
pAddModule.IConcurrent = m.IConcurrent
m.child = append(m.child,module)
m.ancestor.getBaseModule().(*Module).descendants[module.GetModuleId()] = module
err := module.OnInit()
if err != nil {
delete(m.ancestor.getBaseModule().(*Module).descendants, module.GetModuleId())
m.child = m.child[:len(m.child)-1]
log.Error("module OnInit error",log.String("ModuleName",module.GetModuleName()),log.ErrorField("err",err))
return 0, err
}
m.child[module.GetModuleId()] = module
m.ancestor.getBaseModule().(*Module).descendants[module.GetModuleId()] = module
log.Debug("Add module "+module.GetModuleName()+ " completed")
log.Debug("Add module " + module.GetModuleName() + " completed")
return module.GetModuleId(), nil
}
func (m *Module) ReleaseModule(moduleId uint32) {
pModule := m.GetModule(moduleId).getBaseModule().(*Module)
pModule.self.OnRelease()
log.Debug("Release module " + pModule.GetModuleName())
//释放子孙
for id := range pModule.child {
m.ReleaseModule(id)
for i:=len(pModule.child)-1; i>=0; i-- {
m.ReleaseModule(pModule.child[i].GetModuleId())
}
pModule.self.OnRelease()
pModule.GetEventHandler().Destroy()
log.Debug("Release module "+ pModule.GetModuleName())
for pTimer := range pModule.mapActiveTimer {
pTimer.Cancel()
}
@@ -140,7 +142,10 @@ func (m *Module) ReleaseModule(moduleId uint32) {
t.Cancel()
}
delete(m.child, moduleId)
m.child = slices.DeleteFunc(m.child, func(module IModule) bool {
return module.GetModuleId() == moduleId
})
delete(m.ancestor.getBaseModule().(*Module).descendants, moduleId)
//清理被删除的Module
@@ -208,6 +213,7 @@ func (m *Module) OnAddTimer(t timer.ITimer) {
}
}
// Deprecated: this function simply calls SafeAfterFunc
func (m *Module) AfterFunc(d time.Duration, cb func(*timer.Timer)) *timer.Timer {
if m.mapActiveTimer == nil {
m.mapActiveTimer = map[timer.ITimer]struct{}{}
@@ -216,6 +222,7 @@ func (m *Module) AfterFunc(d time.Duration, cb func(*timer.Timer)) *timer.Timer
return m.dispatcher.AfterFunc(d, nil, cb, m.OnCloseTimer, m.OnAddTimer)
}
// Deprecated: this function simply calls SafeCronFunc
func (m *Module) CronFunc(cronExpr *timer.CronExpr, cb func(*timer.Cron)) *timer.Cron {
if m.mapActiveTimer == nil {
m.mapActiveTimer = map[timer.ITimer]struct{}{}
@@ -224,6 +231,7 @@ func (m *Module) CronFunc(cronExpr *timer.CronExpr, cb func(*timer.Cron)) *timer
return m.dispatcher.CronFunc(cronExpr, nil, cb, m.OnCloseTimer, m.OnAddTimer)
}
// Deprecated: this function simply calls SafeNewTicker
func (m *Module) NewTicker(d time.Duration, cb func(*timer.Ticker)) *timer.Ticker {
if m.mapActiveTimer == nil {
m.mapActiveTimer = map[timer.ITimer]struct{}{}
@@ -277,8 +285,8 @@ func (m *Module) SafeNewTicker(tickerId *uint64, d time.Duration, AdditionData i
}
func (m *Module) CancelTimerId(timerId *uint64) bool {
if timerId==nil || *timerId == 0 {
log.Warning("timerId is invalid")
if timerId == nil || *timerId == 0 {
log.Warn("timerId is invalid")
return false
}
@@ -289,7 +297,7 @@ func (m *Module) CancelTimerId(timerId *uint64) bool {
t, ok := m.mapActiveIdTimer[*timerId]
if ok == false {
log.Stack("cannot find timer id ", log.Uint64("timerId",*timerId))
log.StackError("cannot find timer id ", log.Uint64("timerId", *timerId))
return false
}

View File

@@ -1,19 +1,19 @@
package service
import (
"encoding/json"
"errors"
"fmt"
"github.com/duanhf2012/origin/event"
"github.com/duanhf2012/origin/log"
"github.com/duanhf2012/origin/profiler"
"github.com/duanhf2012/origin/rpc"
"github.com/duanhf2012/origin/util/timer"
"github.com/duanhf2012/origin/v2/concurrent"
"github.com/duanhf2012/origin/v2/event"
"github.com/duanhf2012/origin/v2/log"
"github.com/duanhf2012/origin/v2/profiler"
"github.com/duanhf2012/origin/v2/rpc"
"github.com/duanhf2012/origin/v2/util/timer"
"reflect"
"runtime"
"strconv"
"sync"
"sync/atomic"
"github.com/duanhf2012/origin/concurrent"
)
var timerDispatcherLen = 100000
@@ -21,195 +21,220 @@ var maxServiceEventChannelNum = 2000000
type IService interface {
concurrent.IConcurrent
Init(iService IService,getClientFun rpc.FuncRpcClient,getServerFun rpc.FuncRpcServer,serviceCfg interface{})
Init(iService IService, getClientFun rpc.FuncRpcClient, getServerFun rpc.FuncRpcServer, serviceCfg interface{})
Stop()
Start()
OnSetup(iService IService)
OnInit() error
OnStart()
OnRetire()
OnRelease()
SetName(serviceName string)
GetName() string
GetRpcHandler() rpc.IRpcHandler
GetServiceCfg()interface{}
GetServiceCfg() interface{}
GetProfiler() *profiler.Profiler
GetServiceEventChannelNum() int
GetServiceTimerChannelNum() int
SetEventChannelNum(num int)
OpenProfiler()
SetRetire() //设置服务退休状态
IsRetire() bool //服务是否退休
}
type Service struct {
Module
rpcHandler rpc.RpcHandler //rpc
name string //service name
wg sync.WaitGroup
serviceCfg interface{}
goroutineNum int32
startStatus bool
eventProcessor event.IEventProcessor
profiler *profiler.Profiler //性能分析器
nodeEventLister rpc.INodeListener
rpcHandler rpc.RpcHandler //rpc
name string //service name
wg sync.WaitGroup
serviceCfg interface{}
goroutineNum int32
startStatus bool
isRelease int32
retire int32
eventProcessor event.IEventProcessor
profiler *profiler.Profiler //性能分析器
nodeConnLister rpc.INodeConnListener
natsConnListener rpc.INatsConnListener
discoveryServiceLister rpc.IDiscoveryServiceListener
chanEvent chan event.IEvent
closeSig chan struct{}
}
// RpcConnEvent Node结点连接事件
type RpcConnEvent struct{
IsConnect bool
NodeId int
chanEvent chan event.IEvent
closeSig chan struct{}
}
// DiscoveryServiceEvent 发现服务结点
type DiscoveryServiceEvent struct{
type DiscoveryServiceEvent struct {
IsDiscovery bool
ServiceName []string
NodeId int
NodeId string
}
func SetMaxServiceChannel(maxEventChannel int){
type EtcdServiceRecordEvent struct {
NetworkName string
TTLSecond int64
RecordKey string
RecordInfo string
}
type Empty struct {
}
func SetMaxServiceChannel(maxEventChannel int) {
maxServiceEventChannelNum = maxEventChannel
}
func (rpcEventData *DiscoveryServiceEvent) GetEventType() event.EventType{
func (rpcEventData *DiscoveryServiceEvent) GetEventType() event.EventType {
return event.Sys_Event_DiscoverService
}
func (rpcEventData *RpcConnEvent) GetEventType() event.EventType{
return event.Sys_Event_Node_Event
}
func (s *Service) OnSetup(iService IService){
func (s *Service) OnSetup(iService IService) {
if iService.GetName() == "" {
s.name = reflect.Indirect(reflect.ValueOf(iService)).Type().Name()
}
}
func (s *Service) OpenProfiler() {
func (s *Service) OpenProfiler() {
s.profiler = profiler.RegProfiler(s.GetName())
if s.profiler==nil {
log.Fatal("rofiler.RegProfiler "+s.GetName()+" fail.")
if s.profiler == nil {
log.Fatal("profiler.RegProfiler " + s.GetName() + " fail.")
}
}
func (s *Service) Init(iService IService,getClientFun rpc.FuncRpcClient,getServerFun rpc.FuncRpcServer,serviceCfg interface{}) {
func (s *Service) IsRetire() bool {
return atomic.LoadInt32(&s.retire) != 0
}
func (s *Service) SetRetire() {
atomic.StoreInt32(&s.retire, 1)
ev := event.NewEvent()
ev.Type = event.Sys_Event_Retire
s.pushEvent(ev)
}
func (s *Service) Init(iService IService, getClientFun rpc.FuncRpcClient, getServerFun rpc.FuncRpcServer, serviceCfg interface{}) {
s.closeSig = make(chan struct{})
s.dispatcher =timer.NewDispatcher(timerDispatcherLen)
s.dispatcher = timer.NewDispatcher(timerDispatcherLen)
if s.chanEvent == nil {
s.chanEvent = make(chan event.IEvent,maxServiceEventChannelNum)
s.chanEvent = make(chan event.IEvent, maxServiceEventChannelNum)
}
s.rpcHandler.InitRpcHandler(iService.(rpc.IRpcHandler),getClientFun,getServerFun,iService.(rpc.IRpcHandlerChannel))
s.rpcHandler.InitRpcHandler(iService.(rpc.IRpcHandler), getClientFun, getServerFun, iService.(rpc.IRpcHandlerChannel))
s.IRpcHandler = &s.rpcHandler
s.self = iService.(IModule)
//初始化祖先
s.ancestor = iService.(IModule)
s.seedModuleId =InitModuleId
s.seedModuleId = InitModuleId
s.descendants = map[uint32]IModule{}
s.serviceCfg = serviceCfg
s.goroutineNum = 1
s.eventProcessor = event.NewEventProcessor()
s.eventProcessor.Init(s)
s.eventHandler = event.NewEventHandler()
s.eventHandler = event.NewEventHandler()
s.eventHandler.Init(s.eventProcessor)
s.Module.IConcurrent = &concurrent.Concurrent{}
}
func (s *Service) Start() {
s.startStatus = true
atomic.StoreInt32(&s.isRelease, 0)
var waitRun sync.WaitGroup
log.Info(s.GetName() + " service is running")
s.self.(IService).OnStart()
for i:=int32(0);i< s.goroutineNum;i++{
for i := int32(0); i < s.goroutineNum; i++ {
s.wg.Add(1)
waitRun.Add(1)
go func(){
log.Info(s.GetName()+" service is running",)
go func() {
waitRun.Done()
s.Run()
s.run()
}()
}
waitRun.Wait()
}
func (s *Service) Run() {
func (s *Service) run() {
defer s.wg.Done()
var bStop = false
concurrent := s.IConcurrent.(*concurrent.Concurrent)
concurrentCBChannel := concurrent.GetCallBackChannel()
s.self.(IService).OnStart()
for{
cr := s.IConcurrent.(*concurrent.Concurrent)
concurrentCBChannel := cr.GetCallBackChannel()
for {
var analyzer *profiler.Analyzer
select {
case <- s.closeSig:
case <-s.closeSig:
bStop = true
concurrent.Close()
case cb:=<-concurrentCBChannel:
concurrent.DoCallback(cb)
case ev := <- s.chanEvent:
s.Release()
cr.Close()
case cb := <-concurrentCBChannel:
cr.DoCallback(cb)
case ev := <-s.chanEvent:
switch ev.GetEventType() {
case event.Sys_Event_Retire:
log.Info("service OnRetire", log.String("serviceName", s.GetName()))
s.self.(IService).OnRetire()
case event.ServiceRpcRequestEvent:
cEvent,ok := ev.(*event.Event)
cEvent, ok := ev.(*event.Event)
if ok == false {
log.Error("Type event conversion error")
break
}
rpcRequest,ok := cEvent.Data.(*rpc.RpcRequest)
rpcRequest, ok := cEvent.Data.(*rpc.RpcRequest)
if ok == false {
log.Error("Type *rpc.RpcRequest conversion error")
break
}
if s.profiler!=nil {
analyzer = s.profiler.Push("[Req]"+rpcRequest.RpcRequestData.GetServiceMethod())
if s.profiler != nil {
analyzer = s.profiler.Push("[RpcReq]" + rpcRequest.RpcRequestData.GetServiceMethod()+"."+strconv.Itoa(int(rpcRequest.RpcRequestData.GetRpcMethodId())))
}
s.GetRpcHandler().HandlerRpcRequest(rpcRequest)
if analyzer!=nil {
if analyzer != nil {
analyzer.Pop()
analyzer = nil
}
event.DeleteEvent(cEvent)
case event.ServiceRpcResponseEvent:
cEvent,ok := ev.(*event.Event)
cEvent, ok := ev.(*event.Event)
if ok == false {
log.Error("Type event conversion error")
break
}
rpcResponseCB,ok := cEvent.Data.(*rpc.Call)
rpcResponseCB, ok := cEvent.Data.(*rpc.Call)
if ok == false {
log.Error("Type *rpc.Call conversion error")
break
}
if s.profiler!=nil {
if s.profiler != nil {
analyzer = s.profiler.Push("[Res]" + rpcResponseCB.ServiceMethod)
}
s.GetRpcHandler().HandlerRpcResponseCB(rpcResponseCB)
if analyzer!=nil {
if analyzer != nil {
analyzer.Pop()
analyzer = nil
}
event.DeleteEvent(cEvent)
default:
if s.profiler!=nil {
analyzer = s.profiler.Push("[SEvent]"+strconv.Itoa(int(ev.GetEventType())))
if s.profiler != nil {
analyzer = s.profiler.Push("[SEvent]" + strconv.Itoa(int(ev.GetEventType())))
}
s.eventProcessor.EventHandler(ev)
if analyzer!=nil {
if analyzer != nil {
analyzer.Pop()
analyzer = nil
}
}
case t := <- s.dispatcher.ChanTimer:
case t := <-s.dispatcher.ChanTimer:
if s.profiler != nil {
analyzer = s.profiler.Push("[timer]"+t.GetName())
analyzer = s.profiler.Push("[timer]" + t.GetName())
}
t.Do()
if analyzer != nil {
@@ -219,16 +244,12 @@ func (s *Service) Run() {
}
if bStop == true {
if atomic.AddInt32(&s.goroutineNum,-1)<=0 {
s.startStatus = false
s.Release()
}
break
}
}
}
func (s *Service) GetName() string{
func (s *Service) GetName() string {
return s.name
}
@@ -236,101 +257,137 @@ func (s *Service) SetName(serviceName string) {
s.name = serviceName
}
func (s *Service) Release(){
func (s *Service) Release() {
defer func() {
if r := recover(); r != nil {
buf := make([]byte, 4096)
l := runtime.Stack(buf, false)
errString := fmt.Sprint(r)
log.Dump(string(buf[:l]),log.String("error",errString))
log.StackError(fmt.Sprint(r))
}
}()
s.self.OnRelease()
if atomic.AddInt32(&s.isRelease, -1) == -1 {
s.self.OnRelease()
for i:=len(s.child)-1; i>=0; i-- {
s.ReleaseModule(s.child[i].GetModuleId())
}
}
}
func (s *Service) OnRelease(){
func (s *Service) OnRelease() {
}
func (s *Service) OnInit() error {
return nil
}
func (s *Service) Stop(){
log.Info("stop "+s.GetName()+" service ")
func (s *Service) Stop() {
log.Info("stop " + s.GetName() + " service ")
close(s.closeSig)
s.wg.Wait()
log.Info(s.GetName()+" service has been stopped")
log.Info(s.GetName() + " service has been stopped")
}
func (s *Service) GetServiceCfg()interface{}{
func (s *Service) GetServiceCfg() interface{} {
return s.serviceCfg
}
func (s *Service) GetProfiler() *profiler.Profiler{
func (s *Service) ParseServiceCfg(cfg interface{}) error {
if s.serviceCfg == nil {
return errors.New("no service configuration found")
}
rv := reflect.ValueOf(s.serviceCfg)
if rv.Kind() == reflect.Ptr && rv.IsNil() {
return errors.New("no service configuration found")
}
bytes, err := json.Marshal(s.serviceCfg)
if err != nil {
return err
}
return json.Unmarshal(bytes, cfg)
}
func (s *Service) GetProfiler() *profiler.Profiler {
return s.profiler
}
func (s *Service) RegEventReceiverFunc(eventType event.EventType, receiver event.IEventHandler,callback event.EventCallBack){
s.eventProcessor.RegEventReceiverFunc(eventType, receiver,callback)
func (s *Service) RegEventReceiverFunc(eventType event.EventType, receiver event.IEventHandler, callback event.EventCallBack) {
s.eventProcessor.RegEventReceiverFunc(eventType, receiver, callback)
}
func (s *Service) UnRegEventReceiverFunc(eventType event.EventType, receiver event.IEventHandler){
func (s *Service) UnRegEventReceiverFunc(eventType event.EventType, receiver event.IEventHandler) {
s.eventProcessor.UnRegEventReceiverFun(eventType, receiver)
}
func (s *Service) IsSingleCoroutine() bool {
return s.goroutineNum == 1
func (s *Service) RegRawRpc(rpcMethodId uint32, rawRpcCB rpc.RawRpcCallBack) {
s.rpcHandler.RegRawRpc(rpcMethodId, rawRpcCB)
}
func (s *Service) RegRawRpc(rpcMethodId uint32,rawRpcCB rpc.RawRpcCallBack){
s.rpcHandler.RegRawRpc(rpcMethodId,rawRpcCB)
func (s *Service) OnStart() {
}
func (s *Service) OnStart(){
func (s *Service) OnNodeConnEvent(ev event.IEvent) {
re := ev.(*rpc.RpcConnEvent)
if re.IsConnect {
s.nodeConnLister.OnNodeConnected(re.NodeId)
} else {
s.nodeConnLister.OnNodeDisconnect(re.NodeId)
}
}
func (s *Service) OnNodeEvent(ev event.IEvent){
event := ev.(*RpcConnEvent)
func (s *Service) OnNatsConnEvent(ev event.IEvent) {
event := ev.(*rpc.NatsConnEvent)
if event.IsConnect {
s.nodeEventLister.OnNodeConnected(event.NodeId)
}else{
s.nodeEventLister.OnNodeDisconnect(event.NodeId)
s.natsConnListener.OnNatsConnected()
} else {
s.natsConnListener.OnNatsDisconnect()
}
}
func (s *Service) OnDiscoverServiceEvent(ev event.IEvent){
event := ev.(*DiscoveryServiceEvent)
if event.IsDiscovery {
s.discoveryServiceLister.OnDiscoveryService(event.NodeId,event.ServiceName)
}else{
s.discoveryServiceLister.OnUnDiscoveryService(event.NodeId,event.ServiceName)
func (s *Service) OnDiscoverServiceEvent(ev event.IEvent) {
de := ev.(*DiscoveryServiceEvent)
if de.IsDiscovery {
s.discoveryServiceLister.OnDiscoveryService(de.NodeId, de.ServiceName)
} else {
s.discoveryServiceLister.OnUnDiscoveryService(de.NodeId, de.ServiceName)
}
}
func (s *Service) RegRpcListener(rpcEventLister rpc.INodeListener) {
s.nodeEventLister = rpcEventLister
s.RegEventReceiverFunc(event.Sys_Event_Node_Event,s.GetEventHandler(),s.OnNodeEvent)
func (s *Service) RegNodeConnListener(nodeConnListener rpc.INodeConnListener) {
s.nodeConnLister = nodeConnListener
s.RegEventReceiverFunc(event.Sys_Event_Node_Conn_Event, s.GetEventHandler(), s.OnNodeConnEvent)
RegRpcEventFun(s.GetName())
}
func (s *Service) UnRegRpcListener(rpcLister rpc.INodeListener) {
s.UnRegEventReceiverFunc(event.Sys_Event_Node_Event,s.GetEventHandler())
func (s *Service) UnRegNodeConnListener() {
s.UnRegEventReceiverFunc(event.Sys_Event_Node_Conn_Event, s.GetEventHandler())
UnRegRpcEventFun(s.GetName())
}
func (s *Service) RegNatsConnListener(natsConnListener rpc.INatsConnListener) {
s.natsConnListener = natsConnListener
s.RegEventReceiverFunc(event.Sys_Event_Nats_Conn_Event, s.GetEventHandler(), s.OnNatsConnEvent)
RegRpcEventFun(s.GetName())
}
func (s *Service) UnRegNatsConnListener() {
s.UnRegEventReceiverFunc(event.Sys_Event_Nats_Conn_Event, s.GetEventHandler())
UnRegRpcEventFun(s.GetName())
}
func (s *Service) RegDiscoverListener(discoveryServiceListener rpc.IDiscoveryServiceListener) {
s.discoveryServiceLister = discoveryServiceListener
s.RegEventReceiverFunc(event.Sys_Event_DiscoverService,s.GetEventHandler(),s.OnDiscoverServiceEvent)
RegDiscoveryServiceEventFun(s.GetName())
s.RegEventReceiverFunc(event.Sys_Event_DiscoverService, s.GetEventHandler(), s.OnDiscoverServiceEvent)
RegRpcEventFun(s.GetName())
}
func (s *Service) UnRegDiscoverListener(rpcLister rpc.INodeListener) {
s.UnRegEventReceiverFunc(event.Sys_Event_DiscoverService,s.GetEventHandler())
UnRegDiscoveryServiceEventFun(s.GetName())
func (s *Service) UnRegDiscoverListener() {
s.UnRegEventReceiverFunc(event.Sys_Event_DiscoverService, s.GetEventHandler())
UnRegRpcEventFun(s.GetName())
}
func (s *Service) PushRpcRequest(rpcRequest *rpc.RpcRequest) error{
func (s *Service) PushRpcRequest(rpcRequest *rpc.RpcRequest) error {
ev := event.NewEvent()
ev.Type = event.ServiceRpcRequestEvent
ev.Data = rpcRequest
@@ -338,7 +395,7 @@ func (s *Service) PushRpcRequest(rpcRequest *rpc.RpcRequest) error{
return s.pushEvent(ev)
}
func (s *Service) PushRpcResponse(call *rpc.Call) error{
func (s *Service) PushRpcResponse(call *rpc.Call) error {
ev := event.NewEvent()
ev.Type = event.ServiceRpcResponseEvent
ev.Data = call
@@ -346,13 +403,13 @@ func (s *Service) PushRpcResponse(call *rpc.Call) error{
return s.pushEvent(ev)
}
func (s *Service) PushEvent(ev event.IEvent) error{
func (s *Service) PushEvent(ev event.IEvent) error {
return s.pushEvent(ev)
}
func (s *Service) pushEvent(ev event.IEvent) error{
func (s *Service) pushEvent(ev event.IEvent) error {
if len(s.chanEvent) >= maxServiceEventChannelNum {
err := errors.New("The event channel in the service is full")
err := errors.New("the event channel in the service is full")
log.Error(err.Error())
return err
}
@@ -361,25 +418,26 @@ func (s *Service) pushEvent(ev event.IEvent) error{
return nil
}
func (s *Service) GetServiceEventChannelNum() int{
func (s *Service) GetServiceEventChannelNum() int {
return len(s.chanEvent)
}
func (s *Service) GetServiceTimerChannelNum() int{
func (s *Service) GetServiceTimerChannelNum() int {
return len(s.dispatcher.ChanTimer)
}
func (s *Service) SetEventChannelNum(num int){
func (s *Service) SetEventChannelNum(num int) {
if s.chanEvent == nil {
s.chanEvent = make(chan event.IEvent,num)
}else {
s.chanEvent = make(chan event.IEvent, num)
} else {
panic("this stage cannot be set")
}
}
// Deprecated: replace it with the OpenConcurrent function
func (s *Service) SetGoRoutineNum(goroutineNum int32) bool {
//已经开始状态不允许修改协程数量,打开性能分析器不允许开多线程
if s.startStatus == true || s.profiler!=nil {
if s.startStatus == true || s.profiler != nil {
log.Error("open profiler mode is not allowed to set Multi-coroutine.")
return false
}
@@ -387,3 +445,6 @@ func (s *Service) SetGoRoutineNum(goroutineNum int32) bool {
s.goroutineNum = goroutineNum
return true
}
func (s *Service) OnRetire() {
}

View File

@@ -1,6 +1,9 @@
package service
import "errors"
import (
"github.com/duanhf2012/origin/v2/log"
"os"
)
//本地所有的service
var mapServiceName map[string]IService
@@ -11,9 +14,6 @@ type RegDiscoveryServiceEventFunType func(serviceName string)
var RegRpcEventFun RegRpcEventFunType
var UnRegRpcEventFun RegRpcEventFunType
var RegDiscoveryServiceEventFun RegDiscoveryServiceEventFunType
var UnRegDiscoveryServiceEventFun RegDiscoveryServiceEventFunType
func init(){
mapServiceName = map[string]IService{}
setupServiceList = []IService{}
@@ -23,8 +23,8 @@ func Init() {
for _,s := range setupServiceList {
err := s.OnInit()
if err != nil {
errs := errors.New("Failed to initialize "+s.GetName()+" service:"+err.Error())
panic(errs)
log.Error("Failed to initialize "+s.GetName()+" service",log.ErrorField("err",err))
os.Exit(1)
}
}
}
@@ -60,3 +60,9 @@ func StopAllService(){
setupServiceList[i].Stop()
}
}
func NotifyAllServiceRetire(){
for i := len(setupServiceList) - 1; i >= 0; i-- {
setupServiceList[i].SetRetire()
}
}

View File

@@ -0,0 +1,203 @@
package frametimer
import (
"container/heap"
"context"
"errors"
"github.com/duanhf2012/origin/v2/event"
"github.com/duanhf2012/origin/v2/log"
"time"
)
type TimerCB func(context.Context, FrameTimerID)
type _timerHeap struct {
timers []*timerData
}
func (h *_timerHeap) Len() int {
return len(h.timers)
}
func (h *_timerHeap) Less(i, j int) bool {
return h.timers[i].frameNum < h.timers[j].frameNum
}
func (h *_timerHeap) Swap(i, j int) {
h.timers[i], h.timers[j] = h.timers[j], h.timers[i]
h.timers[i].idx = i
h.timers[j].idx = j
}
func (h *_timerHeap) Push(x interface{}) {
td := x.(*timerData)
h.timers = append(h.timers, td)
td.idx = len(h.timers) - 1
}
func (h *_timerHeap) Pop() (ret interface{}) {
l := len(h.timers)
h.timers, ret = h.timers[:l-1], h.timers[l-1]
return
}
type FrameGroup struct {
groupID FrameGroupID
timerHeap _timerHeap
ft *FrameTimer
preTickGlobalFrameNum FrameNumber // 上次tick全局帧
preGlobalFrameNum FrameNumber // 上次设置的全局帧,用于更新FrameTimer.mapFrameGroup关系
frameNum FrameNumber // 当前帧
pause bool // 暂停状态
multiple uint8 // 位数默认1倍只允许1-5倍数
}
func (fg *FrameGroup) init() {
fg.timerHeap.timers = make([]*timerData, 0, 512)
fg.groupID = fg.ft.genGroupID()
fg.multiple = 1
heap.Init(&fg.timerHeap)
}
func (fg *FrameGroup) convertGlobalFrameNum(frameNum FrameNumber) FrameNumber {
return fg.ft.getGlobalFrameNumber() + (frameNum-fg.frameNum)/FrameNumber(fg.multiple)
}
func (fg *FrameGroup) refreshMinFrame() {
if fg.timerHeap.Len() == 0 || fg.pause {
return
}
globalFrameNum := fg.convertGlobalFrameNum(fg.timerHeap.timers[0].frameNum)
fg.ft.refreshGroupMinFrame(fg.groupID, fg.preGlobalFrameNum, globalFrameNum)
fg.preGlobalFrameNum = globalFrameNum
}
func (fg *FrameGroup) tick(globalFrame FrameNumber) {
fg.frameNum = fg.frameNum + (globalFrame-fg.preTickGlobalFrameNum)*FrameNumber(fg.multiple)
fg.preTickGlobalFrameNum = globalFrame
fg.onceTick()
fg.refreshMinFrame()
}
func (fg *FrameGroup) onceTick() {
for fg.timerHeap.Len() > 0 {
if fg.timerHeap.timers[0].frameNum > fg.frameNum {
break
}
t := heap.Pop(&fg.timerHeap).(*timerData)
ev := event.NewEvent()
ev.Type = event.Sys_Event_FrameTick
ev.Data = t
fg.ft.NotifyEvent(ev)
fg.ft.removeTimerData(t.timerID)
if t.tickerFrameNum != 0 {
fg.addTicker(t.timerID, t.tickerFrameNum, t.ctx, t.cb)
}
}
}
func (fg *FrameGroup) addTimer(timerID FrameTimerID, frame FrameNumber, ctx context.Context, cb TimerCB) {
nextFrame := fg.frameNum + frame
td := fg.ft.addTimerData(timerID, nextFrame, 0, ctx, cb)
heap.Push(&fg.timerHeap, td)
}
func (fg *FrameGroup) addTicker(timerID FrameTimerID, frame FrameNumber, ctx context.Context, cb TimerCB) {
nextFrame := fg.frameNum + frame
td := fg.ft.addTimerData(timerID, nextFrame, frame, ctx, cb)
heap.Push(&fg.timerHeap, td)
}
// SetMultiple 设置倍数允许倍数范围1-5
func (fg *FrameGroup) SetMultiple(multiple uint8) error {
if fg.multiple == multiple {
return nil
}
if multiple < 0 || multiple > maxMultiple {
return errors.New("invalid multiplier")
}
fg.multiple = multiple
fg.refreshMinFrame()
return nil
}
// FrameAfterFunc 创建After定时器
func (fg *FrameGroup) FrameAfterFunc(timerID *FrameTimerID, d time.Duration, ctx context.Context, cb TimerCB) {
fg.ft.locker.Lock()
defer fg.ft.locker.Unlock()
frame := FrameNumber(d / fg.ft.oneFrameTime)
newTimerID := fg.ft.genTimerID()
fg.addTimer(newTimerID, frame, ctx, cb)
*timerID = newTimerID
fg.refreshMinFrame()
}
// FrameNewTicker 创建Ticker定时器
func (fg *FrameGroup) FrameNewTicker(timerID *FrameTimerID, d time.Duration, ctx context.Context, cb TimerCB) {
fg.ft.locker.Lock()
defer fg.ft.locker.Unlock()
frame := FrameNumber(d / fg.ft.oneFrameTime)
newTimerID := fg.ft.genTimerID()
fg.addTicker(newTimerID, frame, ctx, cb)
*timerID = newTimerID
fg.refreshMinFrame()
}
// Pause 暂停定时器组
func (fg *FrameGroup) Pause() {
fg.ft.locker.Lock()
defer fg.ft.locker.Unlock()
fg.pause = true
fg.ft.removeGroup(fg.groupID, fg.preGlobalFrameNum)
fg.preGlobalFrameNum = 0
}
// Resume 唤醒定时器组
func (fg *FrameGroup) Resume() {
fg.ft.locker.Lock()
defer fg.ft.locker.Unlock()
fg.pause = false
fg.refreshMinFrame()
fg.preTickGlobalFrameNum = fg.ft.globalFrameNum
}
// CancelTimer 关闭定时器
func (fg *FrameGroup) CancelTimer(timerID FrameTimerID) {
fg.ft.locker.Lock()
defer fg.ft.locker.Unlock()
td := fg.ft.getTimerData(timerID)
if td == nil {
log.Error("cannot find timer", log.Uint64("timerID", uint64(timerID)))
return
}
heap.Remove(&fg.timerHeap, td.idx)
fg.ft.removeGroup(fg.groupID, fg.preGlobalFrameNum)
fg.preGlobalFrameNum = 0
fg.refreshMinFrame()
}
func (fg *FrameGroup) Close(){
fg.ft.removeGroup(fg.groupID, fg.preGlobalFrameNum)
delete(fg.ft.mapGroup,fg.groupID)
}

View File

@@ -0,0 +1,199 @@
package frametimer
import (
"context"
"github.com/duanhf2012/origin/v2/event"
"github.com/duanhf2012/origin/v2/log"
"github.com/duanhf2012/origin/v2/service"
"sync"
"time"
)
const defaultFps = 50
const defaultSleepInterval = time.Millisecond * 3
const maxFps = 1000
const maxMultiple = 5
type FrameGroupID uint64
type FrameTimerID uint64
type FrameNumber uint64
type timerData struct {
frameNum FrameNumber
timerID FrameTimerID
idx int
cb TimerCB
tickerFrameNum FrameNumber
ctx context.Context
}
type FrameTimer struct {
service.Module
fps uint32
oneFrameTime time.Duration
ticker *time.Ticker
mapFrameGroup map[FrameNumber]map[FrameGroupID]struct{}
mapGroup map[FrameGroupID]*FrameGroup
globalFrameNum FrameNumber // 当前帧
maxTimerID FrameTimerID
maxGroupID FrameGroupID
mapTimer map[FrameTimerID]*timerData
timerDataPool *sync.Pool
locker sync.Mutex
sleepInterval time.Duration
}
func (ft *FrameTimer) getTimerData(timerID FrameTimerID) *timerData {
return ft.mapTimer[timerID]
}
func (ft *FrameTimer) addTimerData(timerID FrameTimerID, frameNum FrameNumber, tickerFrameNum FrameNumber, ctx context.Context, cb TimerCB) *timerData {
td := ft.timerDataPool.Get().(*timerData)
td.timerID = timerID
td.frameNum = frameNum
td.cb = cb
td.idx = -1
td.tickerFrameNum = tickerFrameNum
td.ctx = ctx
ft.mapTimer[timerID] = td
return td
}
func (ft *FrameTimer) removeTimerData(timerID FrameTimerID) {
td := ft.mapTimer[timerID]
if td == nil {
return
}
ft.timerDataPool.Put(td)
}
func (ft *FrameTimer) genGroupID() FrameGroupID {
ft.maxGroupID++
return ft.maxGroupID
}
func (ft *FrameTimer) genTimerID() FrameTimerID {
ft.maxTimerID++
return ft.maxTimerID
}
func (ft *FrameTimer) getGlobalFrameNumber() FrameNumber {
return ft.globalFrameNum
}
func (ft *FrameTimer) frameTick() {
preFrameNum := ft.globalFrameNum
ft.globalFrameNum++
ft.locker.Lock()
defer ft.locker.Unlock()
for i := preFrameNum; i <= ft.globalFrameNum; i++ {
mapGroup := ft.mapFrameGroup[i]
delete(ft.mapFrameGroup, i)
for groupID := range mapGroup {
group := ft.mapGroup[groupID]
if group == nil {
continue
}
group.tick(ft.globalFrameNum)
}
}
}
func (ft *FrameTimer) removeGroup(groupID FrameGroupID, frameNum FrameNumber) {
delete(ft.mapFrameGroup[frameNum], groupID)
}
func (ft *FrameTimer) refreshGroupMinFrame(groupID FrameGroupID, preFrameNum FrameNumber, newFrameNum FrameNumber) {
ft.removeGroup(groupID, preFrameNum)
mapGroup := ft.mapFrameGroup[newFrameNum]
if mapGroup == nil {
mapGroup = make(map[FrameGroupID]struct{}, 6)
ft.mapFrameGroup[newFrameNum] = mapGroup
}
mapGroup[groupID] = struct{}{}
}
func (ft *FrameTimer) OnInit() error {
ft.mapFrameGroup = make(map[FrameNumber]map[FrameGroupID]struct{}, 1024)
ft.mapGroup = make(map[FrameGroupID]*FrameGroup, 1024)
ft.mapTimer = make(map[FrameTimerID]*timerData, 2048)
ft.timerDataPool = &sync.Pool{
New: func() any {
return &timerData{}
},
}
if ft.fps == 0 {
ft.fps = defaultFps
}
ft.GetEventProcessor().RegEventReceiverFunc(event.Sys_Event_FrameTick, ft.GetEventHandler(), func(e event.IEvent) {
ev := e.(*event.Event)
td, ok := ev.Data.(*timerData)
if !ok {
log.Error("convert *timerData error")
return
}
td.cb(td.ctx, td.timerID)
event.DeleteEvent(e)
})
ft.oneFrameTime = time.Second / time.Duration(ft.fps)
ft.ticker = time.NewTicker(ft.oneFrameTime)
if ft.sleepInterval == 0 {
ft.sleepInterval = defaultSleepInterval
}
go func() {
preTime := time.Now()
var preFrame FrameNumber
for {
time.Sleep(ft.sleepInterval)
frameMax := FrameNumber(time.Now().Sub(preTime) / ft.oneFrameTime)
for i := preFrame + 1; i <= frameMax; i++ {
ft.frameTick()
}
preFrame = frameMax
}
}()
return nil
}
// SetFps 设置帧率越大误差越低。如果有倍数加速需求可以适当加大fps以减少误差。默认50fps
func (ft *FrameTimer) SetFps(fps uint32) {
if fps > maxFps {
fps = maxFps
}
ft.fps = fps
}
// SetAccuracyInterval 设置时间间隔精度在循环中sleep该时间进行判断。实际上因为sleep有误差所以暂时不使用fps得出。默认为3ms
func (ft *FrameTimer) SetAccuracyInterval(interval time.Duration) {
ft.sleepInterval = interval
}
// NewGroup 创建定时器组
func (ft *FrameTimer) NewGroup() *FrameGroup {
var group FrameGroup
group.ft = ft
group.init()
ft.locker.Lock()
defer ft.locker.Unlock()
ft.mapGroup[group.groupID] = &group
return &group
}

View File

@@ -10,7 +10,7 @@ import (
"net/url"
"time"
"github.com/duanhf2012/origin/service"
"github.com/duanhf2012/origin/v2/service"
)
type HttpClientModule struct {
@@ -39,11 +39,19 @@ func (slf *SyncHttpResponse) Get(timeoutMs int) HttpResponse {
return rsp
}
return HttpResponse{
Err: fmt.Errorf("Getting the return result timeout [%d]ms", timeoutMs),
Err: fmt.Errorf("getting the return result timeout [%d]ms", timeoutMs),
}
}
func (m *HttpClientModule) Init(maxpool int, proxyUrl string) {
func (m *HttpClientModule) InitHttpClient(transport http.RoundTripper, timeout time.Duration, checkRedirect func(req *http.Request, via []*http.Request) error) {
m.client = &http.Client{
Transport: transport,
Timeout: timeout,
CheckRedirect: checkRedirect,
}
}
func (m *HttpClientModule) Init(proxyUrl string, maxPool int, dialTimeout time.Duration, dialKeepAlive time.Duration, idleConnTimeout time.Duration, timeout time.Duration) {
type ProxyFun func(_ *http.Request) (*url.URL, error)
var proxyFun ProxyFun
if proxyUrl != "" {
@@ -55,16 +63,16 @@ func (m *HttpClientModule) Init(maxpool int, proxyUrl string) {
m.client = &http.Client{
Transport: &http.Transport{
DialContext: (&net.Dialer{
Timeout: 5 * time.Second,
KeepAlive: 30 * time.Second,
Timeout: dialTimeout,
KeepAlive: dialKeepAlive,
}).DialContext,
MaxIdleConns: maxpool,
MaxIdleConnsPerHost: maxpool,
IdleConnTimeout: 60 * time.Second,
MaxIdleConns: maxPool,
MaxIdleConnsPerHost: maxPool,
IdleConnTimeout: idleConnTimeout,
Proxy: proxyFun,
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
},
Timeout: 5 * time.Second,
Timeout: timeout,
}
}

View File

@@ -0,0 +1,65 @@
package kafkamodule
import "github.com/IBM/sarama"
type KafkaAdmin struct {
sarama.ClusterAdmin
mapTopic map[string]sarama.TopicDetail
}
func (ka *KafkaAdmin) Setup(kafkaVersion string, addrs []string) error {
config := sarama.NewConfig()
var err error
config.Version, err = sarama.ParseKafkaVersion(kafkaVersion)
if err != nil {
return err
}
ka.ClusterAdmin, err = sarama.NewClusterAdmin(addrs, config)
if err != nil {
return err
}
ka.mapTopic, err = ka.GetTopics()
if err != nil {
ka.ClusterAdmin.Close()
return err
}
return nil
}
func (ka *KafkaAdmin) RefreshTopic() error {
var err error
ka.mapTopic, err = ka.GetTopics()
return err
}
func (ka *KafkaAdmin) HasTopic(topic string) bool {
_, ok := ka.mapTopic[topic]
return ok
}
func (ka *KafkaAdmin) GetTopicDetail(topic string) *sarama.TopicDetail {
topicDetail, ok := ka.mapTopic[topic]
if ok == false {
return nil
}
return &topicDetail
}
func (ka *KafkaAdmin) GetTopics() (map[string]sarama.TopicDetail, error) {
return ka.ListTopics()
}
// CreateTopic 创建主题
// numPartitions分区数
// replicationFactor副本数
// validateOnly参数执行操作时只进行参数验证而不实际执行操作
func (ka *KafkaAdmin) CreateTopic(topic string, numPartitions int32, replicationFactor int16, validateOnly bool) error {
return ka.ClusterAdmin.CreateTopic(topic, &sarama.TopicDetail{NumPartitions: numPartitions, ReplicationFactor: replicationFactor}, validateOnly)
}

View File

@@ -0,0 +1,289 @@
package kafkamodule
import (
"context"
"fmt"
"github.com/IBM/sarama"
"github.com/duanhf2012/origin/v2/log"
"sync"
"time"
)
type ConsumerGroup struct {
sarama.ConsumerGroup
waitGroup sync.WaitGroup
chanExit chan error
ready chan bool
cancel context.CancelFunc
groupId string
}
func NewConsumerConfig(kafkaVersion string, assignor string, offsetsInitial int64) (*sarama.Config, error) {
var err error
config := sarama.NewConfig()
config.Version, err = sarama.ParseKafkaVersion(kafkaVersion)
config.Consumer.Offsets.Initial = offsetsInitial
config.Consumer.Offsets.AutoCommit.Enable = false
switch assignor {
case "sticky":
// 黏性roundRobin,rebalance之后首先保证前面的分配,从后面剥离
// topic:T0{P0,P1,P2,P3,P4,P5},消费者:C1,C2
// ---------------before rebalance:即roundRobin
// C1: T0{P0} T0{P2} T0{P4}
// C2: T0{P1} T0{P3} T0{P5}
// ----------------after rebalance:增加了一个消费者
// C1: T0{P0} T0{P2}
// C2: T0{P1} T0{P3}
// C3: T0{P4} T0{P5} until每个消费者的分区数误差不超过1
config.Consumer.Group.Rebalance.GroupStrategies = []sarama.BalanceStrategy{sarama.NewBalanceStrategySticky()}
case "roundrobin":
// roundRobin --逐个平均分发
// topic: T0{P0,P1,P2},T1{P0,P1,P2,P3}两个消费者C1,C2
// C1: T0{P0} T0{P2} T1{P1} T1{P3}
// C2: T0{P1} T1{P0} T1{P2}
config.Consumer.Group.Rebalance.GroupStrategies = []sarama.BalanceStrategy{sarama.NewBalanceStrategyRoundRobin()}
case "range":
// 默认值 --一次平均分发
// topic: T0{P0,P1,P2,P3},T1{P0,P1,P2,P3},两个消费者C1,C2
// T1分区总数6 / 消费者数2 = 3 ,即该会话的分区每个消费者分3个
// T2分区总数4 / 消费者数2 = 2 ,即该会话的分区每个消费者分2个
// C1: T0{P0, P1, P2} T1{P0, P1}
// C2: T0{P3, P4, P5} T1{P2, P3}
config.Consumer.Group.Rebalance.GroupStrategies = []sarama.BalanceStrategy{sarama.NewBalanceStrategyRange()}
default:
return nil, fmt.Errorf("Unrecognized consumer group partition assignor: %s", assignor)
}
if err != nil {
return nil, err
}
return config, nil
}
type IMsgReceiver interface {
Receiver(msgs []*sarama.ConsumerMessage) bool
}
func (c *ConsumerGroup) Setup(addr []string, topics []string, groupId string, config *sarama.Config, receiverInterval time.Duration, maxReceiverNum int, msgReceiver IMsgReceiver) error {
var err error
c.ConsumerGroup, err = sarama.NewConsumerGroup(addr, groupId, config)
if err != nil {
return nil
}
c.groupId = groupId
c.chanExit = make(chan error, 1)
var handler ConsumerGroupHandler
handler.receiver = msgReceiver
handler.maxReceiverNum = maxReceiverNum
handler.receiverInterval = receiverInterval
handler.chanExit = c.chanExit
var ctx context.Context
ctx, c.cancel = context.WithCancel(context.Background())
c.waitGroup.Add(1)
go func() {
defer c.waitGroup.Done()
for {
if err = c.Consume(ctx, topics, &handler); err != nil {
// 当setup失败的时候error会返回到这里
log.Error("Error from consumer", log.Any("err", err))
return
}
// check if context was cancelled, signaling that the consumer should stop
if ctx.Err() != nil {
log.Info("consumer stop", log.Any("info", ctx.Err()))
}
c.chanExit <- err
}
}()
err = <-c.chanExit
//已经准备好了
return err
}
func (c *ConsumerGroup) Close() {
log.Info("close consumerGroup")
//1.cancel掉
c.cancel()
//2.关闭连接
err := c.ConsumerGroup.Close()
if err != nil {
log.Error("close consumerGroup fail", log.Any("err", err.Error()))
}
//3.等待退出
c.waitGroup.Wait()
}
type ConsumerGroupHandler struct {
receiver IMsgReceiver
receiverInterval time.Duration
maxReceiverNum int
//mapTopicOffset map[string]map[int32]int //map[topic]map[partitionId]offsetInfo
mapTopicData map[string]*MsgData
mx sync.Mutex
chanExit chan error
isRebalance bool //是否为再平衡
//stopSig *int32
}
type MsgData struct {
sync.Mutex
msg []*sarama.ConsumerMessage
mapPartitionOffset map[int32]int64
}
func (ch *ConsumerGroupHandler) Flush(session sarama.ConsumerGroupSession, topic string) {
if topic != "" {
msgData := ch.GetMsgData(topic)
msgData.flush(session, ch.receiver, topic)
return
}
for tp, msgData := range ch.mapTopicData {
msgData.flush(session, ch.receiver, tp)
}
}
func (ch *ConsumerGroupHandler) GetMsgData(topic string) *MsgData {
ch.mx.Lock()
defer ch.mx.Unlock()
msgData := ch.mapTopicData[topic]
if msgData == nil {
msgData = &MsgData{}
msgData.msg = make([]*sarama.ConsumerMessage, 0, ch.maxReceiverNum)
ch.mapTopicData[topic] = msgData
}
return msgData
}
func (md *MsgData) flush(session sarama.ConsumerGroupSession, receiver IMsgReceiver, topic string) {
if len(md.msg) == 0 {
return
}
//发送给接收者
for {
ok := receiver.Receiver(md.msg)
if ok == true {
break
}
}
for pId, offset := range md.mapPartitionOffset {
session.MarkOffset(topic, pId, offset+1, "")
log.Info(fmt.Sprintf("topic %s,pid %d,offset %d", topic, pId, offset+1))
}
session.Commit()
//log.Info("commit")
//time.Sleep(1000 * time.Second)
//置空
md.msg = md.msg[:0]
clear(md.mapPartitionOffset)
}
func (md *MsgData) appendMsg(session sarama.ConsumerGroupSession, msg *sarama.ConsumerMessage, receiver IMsgReceiver, maxReceiverNum int) {
md.Lock()
defer md.Unlock()
//收到的offset只会越来越大在
if md.mapPartitionOffset == nil {
md.mapPartitionOffset = make(map[int32]int64, 10)
}
md.mapPartitionOffset[msg.Partition] = msg.Offset
md.msg = append(md.msg, msg)
if len(md.msg) < maxReceiverNum {
return
}
md.flush(session, receiver, msg.Topic)
}
func (ch *ConsumerGroupHandler) AppendMsg(session sarama.ConsumerGroupSession, msg *sarama.ConsumerMessage) {
dataMsg := ch.GetMsgData(msg.Topic)
dataMsg.appendMsg(session, msg, ch.receiver, ch.maxReceiverNum)
}
func (ch *ConsumerGroupHandler) Setup(session sarama.ConsumerGroupSession) error {
ch.mapTopicData = make(map[string]*MsgData, 128)
if ch.isRebalance == false {
ch.chanExit <- nil
}
ch.isRebalance = true
return nil
}
func (ch *ConsumerGroupHandler) Cleanup(session sarama.ConsumerGroupSession) error {
ch.Flush(session, "")
return nil
}
func (ch *ConsumerGroupHandler) ConsumeClaim(session sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error {
ticker := time.NewTicker(ch.receiverInterval)
for {
select {
case msg := <-claim.Messages():
if msg == nil {
log.SWarning("claim will exit", log.Any("topic", claim.Topic()), log.Any("Partition", claim.Partition()))
return nil
}
ch.AppendMsg(session, msg)
case <-ticker.C:
ch.Flush(session, claim.Topic())
case <-session.Context().Done():
return nil
}
}
}
/*
阿里云参数说明https://sls.aliyun.com/doc/oscompatibledemo/sarama_go_kafka_consume.html
conf.Net.TLS.Enable = true
conf.Net.SASL.Enable = true
conf.Net.SASL.User = project
conf.Net.SASL.Password = fmt.Sprintf("%s#%s", accessId, accessKey)
conf.Net.SASL.Mechanism = "PLAIN"
conf.Net.TLS.Enable = true
conf.Net.SASL.Enable = true
conf.Net.SASL.User = project
conf.Net.SASL.Password = fmt.Sprintf("%s#%s", accessId, accessKey)
conf.Net.SASL.Mechanism = "PLAIN"
conf.Consumer.Fetch.Min = 1
conf.Consumer.Fetch.Default = 1024 * 1024
conf.Consumer.Retry.Backoff = 2 * time.Second
conf.Consumer.MaxWaitTime = 250 * time.Millisecond
conf.Consumer.MaxProcessingTime = 100 * time.Millisecond
conf.Consumer.Return.Errors = false
conf.Consumer.Offsets.AutoCommit.Enable = true
conf.Consumer.Offsets.AutoCommit.Interval = 1 * time.Second
conf.Consumer.Offsets.Initial = sarama.OffsetOldest
conf.Consumer.Offsets.Retry.Max = 3
*/

View File

@@ -0,0 +1,146 @@
package kafkamodule
import (
"context"
"github.com/IBM/sarama"
"github.com/duanhf2012/origin/v2/log"
"github.com/duanhf2012/origin/v2/service"
"time"
)
type IProducer interface {
}
type SyncProducer struct {
}
type AsyncProducer struct {
}
type Producer struct {
service.Module
sarama.SyncProducer
sarama.AsyncProducer
}
// NewProducerConfig 新建producerConfig
// kafkaVersion kafka版本
// returnErrreturnSucc 是否返回错误与成功
// requiredAcks -1 #全量同步确认,强可靠性保证(当所有的 leader 和 follower 都接收成功时)#WaitForAll 1 #leader 确认收到, 默认(仅 leader 反馈)#WaitForLocal 0 #不确认,但是吞吐量大(不 care 结果) #NoResponse
// Idempotent幂等 确保信息都准确写入一份副本,用于幂等生产者当这一项设置为true的时候生产者将保证生产的消息一定是有序且精确一次的
// partitioner 生成分区器,用于选择向哪个分区发送信息,默认情况下对消息密钥进行散列
func NewProducerConfig(kafkaVersion string, returnErr bool, returnSucc bool, requiredAcks sarama.RequiredAcks, Idempotent bool,
partitioner sarama.PartitionerConstructor) (*sarama.Config, error) {
config := sarama.NewConfig()
var err error
config.Version, err = sarama.ParseKafkaVersion(kafkaVersion)
if err != nil {
return nil, err
}
config.Producer.Return.Errors = returnErr
config.Producer.Return.Successes = returnSucc
config.Producer.RequiredAcks = requiredAcks
config.Producer.Partitioner = partitioner
config.Producer.Timeout = 10 * time.Second
config.Producer.Idempotent = Idempotent
if Idempotent == true {
config.Net.MaxOpenRequests = 1
}
return config, nil
}
func (p *Producer) SyncSetup(addr []string, config *sarama.Config) error {
var err error
p.SyncProducer, err = sarama.NewSyncProducer(addr, config)
if err != nil {
return err
}
return nil
}
func (p *Producer) ASyncSetup(addr []string, config *sarama.Config) error {
var err error
p.AsyncProducer, err = sarama.NewAsyncProducer(addr, config)
if err != nil {
return err
}
go func() {
p.asyncRun()
}()
return nil
}
func (p *Producer) asyncRun() {
for {
select {
case sm := <-p.Successes():
if sm.Metadata == nil {
break
}
asyncReturn := sm.Metadata.(*AsyncReturn)
asyncReturn.chanReturn <- asyncReturn
case em := <-p.Errors():
log.Error("async kafkamodule error", log.ErrorAttr("err", em.Err))
if em.Msg.Metadata == nil {
break
}
asyncReturn := em.Msg.Metadata.(*AsyncReturn)
asyncReturn.Err = em.Err
asyncReturn.chanReturn <- asyncReturn
}
}
}
type AsyncReturn struct {
Msg *sarama.ProducerMessage
Err error
chanReturn chan *AsyncReturn
}
func (ar *AsyncReturn) WaitOk(ctx context.Context) (*sarama.ProducerMessage, error) {
asyncReturn := ar.Msg.Metadata.(*AsyncReturn)
select {
case <-asyncReturn.chanReturn:
return asyncReturn.Msg, asyncReturn.Err
case <-ctx.Done():
return nil, ctx.Err()
}
}
func (p *Producer) AsyncSendMessage(msg *sarama.ProducerMessage) *AsyncReturn {
asyncReturn := AsyncReturn{Msg: msg, chanReturn: make(chan *AsyncReturn, 1)}
msg.Metadata = &asyncReturn
p.AsyncProducer.Input() <- msg
return &asyncReturn
}
func (p *Producer) AsyncPushMessage(msg *sarama.ProducerMessage) {
p.AsyncProducer.Input() <- msg
}
func (p *Producer) Close() {
if p.SyncProducer != nil {
p.SyncProducer.Close()
p.SyncProducer = nil
}
if p.AsyncProducer != nil {
p.AsyncProducer.Close()
p.AsyncProducer = nil
}
}
func (p *Producer) SendMessage(msg *sarama.ProducerMessage) (partition int32, offset int64, err error) {
return p.SyncProducer.SendMessage(msg)
}
func (p *Producer) SendMessages(msgs []*sarama.ProducerMessage) error {
return p.SyncProducer.SendMessages(msgs)
}

View File

@@ -0,0 +1,151 @@
package kafkamodule
import (
"context"
"fmt"
"github.com/IBM/sarama"
"testing"
"time"
)
// 对各参数和机制名称的说明https://blog.csdn.net/u013311345/article/details/129217728
type MsgReceiver struct {
t *testing.T
}
func (mr *MsgReceiver) Receiver(msgs []*sarama.ConsumerMessage) bool {
for _, m := range msgs {
mr.t.Logf("time:%s, topic:%s, partition:%d, offset:%d, key:%s, value:%s", time.Now().Format("2006-01-02 15:04:05.000"), m.Topic, m.Partition, m.Offset, m.Key, string(m.Value))
}
return true
}
var addr = []string{"192.168.13.24:9092", "192.168.13.24:9093", "192.168.13.24:9094", "192.168.13.24:9095"}
var topicName = []string{"test_topic_1", "test_topic_2"}
var kafkaVersion = "3.3.1"
func producer(t *testing.T) {
var admin KafkaAdmin
err := admin.Setup(kafkaVersion, addr)
if err != nil {
t.Fatal(err)
}
for _, tName := range topicName {
if admin.HasTopic(tName) == false {
err = admin.CreateTopic(tName, 2, 2, false)
t.Log(err)
}
}
var pd Producer
cfg, err := NewProducerConfig(kafkaVersion, true, true, sarama.WaitForAll, false, sarama.NewHashPartitioner)
if err != nil {
t.Fatal(err)
}
err = pd.SyncSetup(addr, cfg)
if err != nil {
t.Fatal(err)
}
now := time.Now()
//msgs := make([]*sarama.ProducerMessage, 0, 20000)
for i := 0; i < 20000; i++ {
var msg sarama.ProducerMessage
msg.Key = sarama.StringEncoder(fmt.Sprintf("%d", i))
msg.Topic = topicName[0]
msg.Value = sarama.StringEncoder(fmt.Sprintf("i'm %d", i))
pd.SendMessage(&msg)
//msgs = append(msgs, &msg)
}
//err = pd.SendMessages(msgs)
//t.Log(err)
t.Log(time.Now().Sub(now).Milliseconds())
pd.Close()
}
func producer_async(t *testing.T) {
var admin KafkaAdmin
err := admin.Setup(kafkaVersion, addr)
if err != nil {
t.Fatal(err)
}
for _, tName := range topicName {
if admin.HasTopic(tName) == false {
err = admin.CreateTopic(tName, 10, 2, false)
t.Log(err)
}
}
var pd Producer
cfg, err := NewProducerConfig(kafkaVersion, true, true, sarama.WaitForAll, false, sarama.NewHashPartitioner)
if err != nil {
t.Fatal(err)
}
err = pd.ASyncSetup(addr, cfg)
if err != nil {
t.Fatal(err)
}
now := time.Now()
msgs := make([]*AsyncReturn, 0, 20000)
for i := 0; i < 200000; i++ {
var msg sarama.ProducerMessage
msg.Key = sarama.StringEncoder(fmt.Sprintf("%d", i))
msg.Topic = topicName[0]
msg.Value = sarama.StringEncoder(fmt.Sprintf("i'm %d", i))
r := pd.AsyncSendMessage(&msg)
msgs = append(msgs, r)
}
//err = pd.SendMessages(msgs)
//t.Log(err)
for _, r := range msgs {
r.WaitOk(context.Background())
//t.Log(m, e)
}
t.Log(time.Now().Sub(now).Milliseconds())
time.Sleep(1000 * time.Second)
pd.Close()
}
func consumer(t *testing.T) {
var admin KafkaAdmin
err := admin.Setup(kafkaVersion, addr)
if err != nil {
t.Fatal(err)
}
for _, tName := range topicName {
if admin.HasTopic(tName) == false {
err = admin.CreateTopic(tName, 10, 2, false)
t.Log(err)
}
}
var cg ConsumerGroup
cfg, err := NewConsumerConfig(kafkaVersion, "sticky", sarama.OffsetOldest)
if err != nil {
t.Fatal(err)
}
err = cg.Setup(addr, topicName, "test_groupId", cfg, 50*time.Second, 10, &MsgReceiver{t: t})
t.Log(err)
time.Sleep(10000 * time.Second)
cg.Close()
}
func TestConsumerAndProducer(t *testing.T) {
producer_async(t)
//go producer(t)
//producer(t)
//consumer(t)
}

View File

@@ -0,0 +1,7 @@
package kafkamodule
type Sasl struct {
UserName string `json:"UserName"`
Passwd string `json:"Passwd"`
InstanceId string `json:"InstanceId"`
}

View File

@@ -5,7 +5,6 @@ import (
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
"go.mongodb.org/mongo-driver/x/bsonx"
"time"
)
@@ -48,6 +47,10 @@ func (mm *MongoModule) Start() error {
return nil
}
func (mm *MongoModule) Stop() error {
return mm.client.Disconnect(context.Background())
}
func (mm *MongoModule) TakeSession() Session {
return Session{Client: mm.client, maxOperatorTimeOut: mm.maxOperatorTimeOut}
}
@@ -72,12 +75,12 @@ func (s *Session) NextSeq(db string, collection string, id interface{}) (int, er
return res.Seq, err
}
// indexKeys[索引][每个索引key字段]
// EnsureIndex indexKeys[索引][每个索引key字段]
func (s *Session) EnsureIndex(db string, collection string, indexKeys [][]string, bBackground bool, sparse bool, asc bool) error {
return s.ensureIndex(db, collection, indexKeys, bBackground, false, sparse, asc)
}
// indexKeys[索引][每个索引key字段]
// EnsureUniqueIndex indexKeys[索引][每个索引key字段]
func (s *Session) EnsureUniqueIndex(db string, collection string, indexKeys [][]string, bBackground bool, sparse bool, asc bool) error {
return s.ensureIndex(db, collection, indexKeys, bBackground, true, sparse, asc)
}
@@ -86,12 +89,12 @@ func (s *Session) EnsureUniqueIndex(db string, collection string, indexKeys [][]
func (s *Session) ensureIndex(db string, collection string, indexKeys [][]string, bBackground bool, unique bool, sparse bool, asc bool) error {
var indexes []mongo.IndexModel
for _, keys := range indexKeys {
keysDoc := bsonx.Doc{}
keysDoc := bson.D{}
for _, key := range keys {
if asc {
keysDoc = keysDoc.Append(key, bsonx.Int32(1))
keysDoc = append(keysDoc, bson.E{Key: key, Value: 1})
} else {
keysDoc = keysDoc.Append(key, bsonx.Int32(-1))
keysDoc = append(keysDoc, bson.E{Key: key, Value: -1})
}
}

View File

@@ -1,181 +0,0 @@
package mongomodule
import (
"github.com/duanhf2012/origin/log"
"gopkg.in/mgo.v2"
"gopkg.in/mgo.v2/bson"
"sync"
"time"
"container/heap"
_ "gopkg.in/mgo.v2"
)
// session
type Session struct {
*mgo.Session
ref int
index int
}
type SessionHeap []*Session
func (h SessionHeap) Len() int {
return len(h)
}
func (h SessionHeap) Less(i, j int) bool {
return h[i].ref < h[j].ref
}
func (h SessionHeap) Swap(i, j int) {
h[i], h[j] = h[j], h[i]
h[i].index = i
h[j].index = j
}
func (h *SessionHeap) Push(s interface{}) {
s.(*Session).index = len(*h)
*h = append(*h, s.(*Session))
}
func (h *SessionHeap) Pop() interface{} {
l := len(*h)
s := (*h)[l-1]
s.index = -1
*h = (*h)[:l-1]
return s
}
type DialContext struct {
sync.Mutex
sessions SessionHeap
}
type MongoModule struct {
dailContext *DialContext
}
func (slf *MongoModule) Init(url string,sessionNum uint32,dialTimeout time.Duration, timeout time.Duration) error {
var err error
slf.dailContext, err = dialWithTimeout(url, sessionNum, dialTimeout, timeout)
return err
}
func (slf *MongoModule) Ref() *Session{
return slf.dailContext.Ref()
}
func (slf *MongoModule) UnRef(s *Session) {
slf.dailContext.UnRef(s)
}
// goroutine safe
func dialWithTimeout(url string, sessionNum uint32, dialTimeout time.Duration, timeout time.Duration) (*DialContext, error) {
if sessionNum <= 0 {
sessionNum = 100
log.Release("invalid sessionNum, reset to %v", sessionNum)
}
s, err := mgo.DialWithTimeout(url, dialTimeout)
if err != nil {
return nil, err
}
s.SetMode(mgo.Strong,true)
s.SetSyncTimeout(timeout)
s.SetSocketTimeout(timeout)
c := new(DialContext)
// sessions
c.sessions = make(SessionHeap, sessionNum)
c.sessions[0] = &Session{s, 0, 0}
for i := 1; i < int(sessionNum); i++ {
c.sessions[i] = &Session{s.New(), 0, i}
}
heap.Init(&c.sessions)
return c, nil
}
// goroutine safe
func (c *DialContext) Close() {
c.Lock()
for _, s := range c.sessions {
s.Close()
}
c.Unlock()
}
// goroutine safe
func (c *DialContext) Ref() *Session {
c.Lock()
s := c.sessions[0]
if s.ref == 0 {
s.Refresh()
}
s.ref++
heap.Fix(&c.sessions, 0)
c.Unlock()
return s
}
// goroutine safe
func (c *DialContext) UnRef(s *Session) {
if s == nil {
return
}
c.Lock()
s.ref--
heap.Fix(&c.sessions, s.index)
c.Unlock()
}
// goroutine safe
func (s *Session) EnsureCounter(db string, collection string, id string) error {
err := s.DB(db).C(collection).Insert(bson.M{
"_id": id,
"seq": 0,
})
if mgo.IsDup(err) {
return nil
} else {
return err
}
}
// goroutine safe
func (s *Session) NextSeq(db string, collection string, id string) (int, error) {
var res struct {
Seq int
}
_, err := s.DB(db).C(collection).FindId(id).Apply(mgo.Change{
Update: bson.M{"$inc": bson.M{"seq": 1}},
ReturnNew: true,
}, &res)
return res.Seq, err
}
// goroutine safe
func (s *Session) EnsureIndex(db string, collection string, key []string, bBackground bool) error {
return s.DB(db).C(collection).EnsureIndex(mgo.Index{
Key: key,
Unique: false,
Sparse: true,
Background: bBackground,
})
}
// goroutine safe
func (s *Session) EnsureUniqueIndex(db string, collection string, key []string, bBackground bool) error {
return s.DB(db).C(collection).EnsureIndex(mgo.Index{
Key: key,
Unique: true,
Sparse: true,
Background: bBackground,
})
}

View File

@@ -1,95 +0,0 @@
package mongomodule
import (
"fmt"
_ "gopkg.in/mgo.v2"
"gopkg.in/mgo.v2/bson"
"testing"
"time"
)
type Student struct {
ID bson.ObjectId `bson:"_id"`
Name string `bson: "name"`
Age int `bson: "age"`
Sid string `bson: "sid"`
Status int `bson: "status"`
}
type StudentName struct {
Name string `bson: "name"`
}
func Test_Example(t *testing.T) {
module:=MongoModule{}
module.Init("mongodb://admin:123456@192.168.2.119:27017",100, 5*time.Second,5*time.Second)
// take session
s := module.Take()
c := s.DB("test2").C("t_student")
//2.定义对象
insertData := Student{
ID:bson.NewObjectId(),
Name: "seeta11",
Age: 35, //*^_^*
Sid: "s20180907",
Status: 1,
}
updateData := Student{
Name: "seeta11",
Age: 18,
Sid: "s20180907",
Status: 1,
}
//3.插入数据
err := c.Insert(&insertData)
//4.查找数据
selector := bson.M{"_id":bson.ObjectIdHex("5f25303e999c622d361989b0")}
m:=Student{}
err = c.Find(selector).One(&m)
//5.更新数据
//selector2 := bson.M{"_id":bson.ObjectIdHex("5f25303e999c622d361989b0")}
updateData.ID = bson.ObjectIdHex("5f25303e999c622d361989b0")
err = c.UpdateId(bson.ObjectIdHex("5f25303e999c622d361989b0"),&updateData)
if err != nil {
fmt.Print(err)
}
//6.删除数据
err = c.RemoveId(bson.ObjectIdHex("5f252f09999c622d36198951"))
if err != nil {
fmt.Print(err)
}
//7.序号自增
s.EnsureCounter("test2","t_student","5f252f09999c622d36198951")
for i := 0; i < 3; i++ {
id, err := s.NextSeq("test2", "t_student", "5f252f09999c622d36198951")
if err != nil {
fmt.Println(err)
return
}
fmt.Println(id)
}
//8.setoninsert使用
info,uErr := c.Upsert(bson.M{"_id":bson.ObjectIdHex("5f252f09999c622d36198951")},bson.M{
"$setOnInsert":bson.M{"Name":"setoninsert","Age":55}})
//9.修改部分数字数据
selector1 := bson.M{"_id":bson.ObjectIdHex("60473de655f1012e7453b369")}
update1 := bson.M{"$set":bson.M{"name":"xxxxx","age":1111}}
c.Update(selector1,update1)
fmt.Println(info,uErr)
}

View File

@@ -4,7 +4,7 @@ import (
"database/sql"
"errors"
"fmt"
"github.com/duanhf2012/origin/log"
"github.com/duanhf2012/origin/v2/log"
"net/url"
"reflect"
"strconv"
@@ -12,15 +12,15 @@ import (
"sync"
"time"
"github.com/duanhf2012/origin/service"
"github.com/duanhf2012/origin/v2/service"
_ "github.com/go-sql-driver/mysql"
)
type SyncFun func()
type DBExecute struct {
syncExecuteFun chan SyncFun
syncExecuteExit chan bool
syncExecuteFun chan SyncFun
syncExecuteExit chan bool
}
type PingExecute struct {
@@ -28,22 +28,21 @@ type PingExecute struct {
pintExit chan bool
}
// DBModule ...
type MySQLModule struct {
service.Module
db *sql.DB
url string
username string
password string
dbname string
slowDuration time.Duration
pingCoroutine PingExecute
waitGroup sync.WaitGroup
db *sql.DB
url string
username string
password string
dbname string
slowDuration time.Duration
pingCoroutine PingExecute
waitGroup sync.WaitGroup
}
// Tx ...
type Tx struct {
tx *sql.Tx
tx *sql.Tx
slowDuration time.Duration
}
@@ -53,7 +52,7 @@ type DBResult struct {
RowsAffected int64
rowNum int
RowInfo map[string][]interface{} //map[fieldname][row]sql.NullString
RowInfo map[string][]interface{} //map[fieldName][row]sql.NullString
}
type DataSetList struct {
@@ -63,19 +62,17 @@ type DataSetList struct {
blur bool
}
type dbControl interface {
Exec(query string, args ...interface{}) (sql.Result, error)
Query(query string, args ...interface{}) (*sql.Rows, error)
}
func (m *MySQLModule) Init( url string, userName string, password string, dbname string,maxConn int) error {
func (m *MySQLModule) Init(url string, userName string, password string, dbname string, maxConn int) error {
m.url = url
m.username = userName
m.password = password
m.dbname = dbname
m.pingCoroutine = PingExecute{tickerPing : time.NewTicker(5*time.Second), pintExit : make(chan bool, 1)}
m.pingCoroutine = PingExecute{tickerPing: time.NewTicker(5 * time.Second), pintExit: make(chan bool, 1)}
return m.connect(maxConn)
}
@@ -85,12 +82,12 @@ func (m *MySQLModule) SetQuerySlowTime(slowDuration time.Duration) {
}
func (m *MySQLModule) Query(strQuery string, args ...interface{}) (*DataSetList, error) {
return query(m.slowDuration, m.db,strQuery,args...)
return query(m.slowDuration, m.db, strQuery, args...)
}
// Exec ...
func (m *MySQLModule) Exec(strSql string, args ...interface{}) (*DBResult, error) {
return exec(m.slowDuration, m.db,strSql,args...)
return exec(m.slowDuration, m.db, strSql, args...)
}
// Begin starts a transaction.
@@ -116,14 +113,14 @@ func (slf *Tx) Commit() error {
return slf.tx.Commit()
}
// QueryEx executes a query that return rows.
// Query executes a query that return rows.
func (slf *Tx) Query(strQuery string, args ...interface{}) (*DataSetList, error) {
return query(slf.slowDuration,slf.tx,strQuery,args...)
return query(slf.slowDuration, slf.tx, strQuery, args...)
}
// Exec executes a query that doesn't return rows.
func (slf *Tx) Exec(strSql string, args ...interface{}) (*DBResult, error) {
return exec(slf.slowDuration,slf.tx,strSql,args...)
return exec(slf.slowDuration, slf.tx, strSql, args...)
}
// Connect ...
@@ -168,7 +165,7 @@ func (m *MySQLModule) runPing() {
}
}
func checkArgs(args ...interface{}) error {
func checkArgs(args ...interface{}) error {
for _, val := range args {
if reflect.TypeOf(val).Kind() == reflect.String {
retVal := val.(string)
@@ -211,33 +208,33 @@ func checkArgs(args ...interface{}) error {
return nil
}
func checkSlow(slowDuration time.Duration,Time time.Duration) bool {
if slowDuration != 0 && Time >=slowDuration {
func checkSlow(slowDuration time.Duration, Time time.Duration) bool {
if slowDuration != 0 && Time >= slowDuration {
return true
}
return false
}
func query(slowDuration time.Duration,db dbControl,strQuery string, args ...interface{}) (*DataSetList, error) {
func query(slowDuration time.Duration, db dbControl, strQuery string, args ...interface{}) (*DataSetList, error) {
datasetList := DataSetList{}
datasetList.tag = "json"
datasetList.blur = true
if checkArgs(args) != nil {
log.Error("CheckArgs is error :%s", strQuery)
return &datasetList, fmt.Errorf("CheckArgs is error!")
return &datasetList, fmt.Errorf("checkArgs is error")
}
if db == nil {
log.Error("cannot connect database:%s", strQuery)
return &datasetList, fmt.Errorf("cannot connect database!")
return &datasetList, fmt.Errorf("cannot connect database")
}
TimeFuncStart := time.Now()
rows, err := db.Query(strQuery, args...)
timeFuncPass := time.Since(TimeFuncStart)
if checkSlow(slowDuration,timeFuncPass) {
if checkSlow(slowDuration, timeFuncPass) {
log.Error("DBModule QueryEx Time %s , Query :%s , args :%+v", timeFuncPass, strQuery, args)
}
if err != nil {
@@ -256,7 +253,7 @@ func query(slowDuration time.Duration,db dbControl,strQuery string, args ...inte
if dbResult.RowInfo == nil {
dbResult.RowInfo = make(map[string][]interface{})
}
//RowInfo map[string][][]sql.NullString //map[fieldname][row][column]sql.NullString
colField, err := rows.Columns()
if err != nil {
return &datasetList, err
@@ -268,10 +265,10 @@ func query(slowDuration time.Duration,db dbControl,strQuery string, args ...inte
}
rows.Scan(valuePtrs...)
for idx, fieldname := range colField {
fieldRowData := dbResult.RowInfo[strings.ToLower(fieldname)]
for idx, fieldName := range colField {
fieldRowData := dbResult.RowInfo[strings.ToLower(fieldName)]
fieldRowData = append(fieldRowData, valuePtrs[idx])
dbResult.RowInfo[strings.ToLower(fieldname)] = fieldRowData
dbResult.RowInfo[strings.ToLower(fieldName)] = fieldRowData
}
dbResult.rowNum += 1
}
@@ -282,7 +279,7 @@ func query(slowDuration time.Duration,db dbControl,strQuery string, args ...inte
if hasRet == false {
if rows.Err() != nil {
log.Error( "Query:%s(%+v)", strQuery, rows)
log.Error("Query:%s(%+v)", strQuery, rows)
}
break
}
@@ -291,22 +288,22 @@ func query(slowDuration time.Duration,db dbControl,strQuery string, args ...inte
return &datasetList, nil
}
func exec(slowDuration time.Duration,db dbControl,strSql string, args ...interface{}) (*DBResult, error) {
func exec(slowDuration time.Duration, db dbControl, strSql string, args ...interface{}) (*DBResult, error) {
ret := &DBResult{}
if db == nil {
log.Error("cannot connect database:%s", strSql)
return ret, fmt.Errorf("cannot connect database!")
return ret, fmt.Errorf("cannot connect database")
}
if checkArgs(args) != nil {
log.Error("CheckArgs is error :%s", strSql)
return ret, fmt.Errorf("CheckArgs is error!")
return ret, fmt.Errorf("checkArgs is error")
}
TimeFuncStart := time.Now()
res, err := db.Exec(strSql, args...)
timeFuncPass := time.Since(TimeFuncStart)
if checkSlow(slowDuration,timeFuncPass) {
if checkSlow(slowDuration, timeFuncPass) {
log.Error("DBModule QueryEx Time %s , Query :%s , args :%+v", timeFuncPass, strSql, args)
}
if err != nil {
@@ -361,21 +358,21 @@ func (ds *DataSetList) slice2interface(in interface{}) error {
}
v := reflect.ValueOf(in).Elem()
newv := reflect.MakeSlice(v.Type(), 0, length)
v.Set(newv)
newV := reflect.MakeSlice(v.Type(), 0, length)
v.Set(newV)
v.SetLen(length)
for i := 0; i < length; i++ {
idxv := v.Index(i)
if idxv.Kind() == reflect.Ptr {
newObj := reflect.New(idxv.Type().Elem())
idxV := v.Index(i)
if idxV.Kind() == reflect.Ptr {
newObj := reflect.New(idxV.Type().Elem())
v.Index(i).Set(newObj)
idxv = newObj
idxV = newObj
} else {
idxv = idxv.Addr()
idxV = idxV.Addr()
}
err := ds.rowData2interface(i, ds.dataSetList[ds.currentDataSetIdx].RowInfo, idxv)
err := ds.rowData2interface(i, ds.dataSetList[ds.currentDataSetIdx].RowInfo, idxV)
if err != nil {
return err
}
@@ -390,7 +387,7 @@ func (ds *DataSetList) rowData2interface(rowIdx int, m map[string][]interface{},
typ := t.Elem()
if !val.IsValid() {
return errors.New("Incorrect data type!")
return errors.New("incorrect data type")
}
for i := 0; i < val.NumField(); i++ {
@@ -402,11 +399,11 @@ func (ds *DataSetList) rowData2interface(rowIdx int, m map[string][]interface{},
}
if tag != "" && tag != "-" {
vtag := strings.ToLower(tag)
columnData, ok := m[vtag]
vTag := strings.ToLower(tag)
columnData, ok := m[vTag]
if ok == false {
if !ds.blur {
return fmt.Errorf("Cannot find filed name %s!", vtag)
return fmt.Errorf("cannot find filed name %s", vTag)
}
continue
}
@@ -416,12 +413,12 @@ func (ds *DataSetList) rowData2interface(rowIdx int, m map[string][]interface{},
meta := columnData[rowIdx].(*sql.NullString)
if !ok {
if !ds.blur {
return fmt.Errorf("No corresponding field was found in the result set %s!", tag)
return fmt.Errorf("no corresponding field was found in the result set %s", tag)
}
continue
}
if !value.CanSet() {
return errors.New("Struct fields do not have read or write permissions!")
return errors.New("struct fields do not have read or write permissions")
}
if meta.Valid == false {
@@ -460,7 +457,7 @@ func (ds *DataSetList) rowData2interface(rowIdx int, m map[string][]interface{},
}
value.SetBool(b)
default:
return errors.New("The database map has unrecognized data types!")
return errors.New("the database map has unrecognized data types")
}
}
}

View File

@@ -0,0 +1,313 @@
package ginmodule
import (
"context"
"github.com/duanhf2012/origin/v2/event"
"github.com/duanhf2012/origin/v2/log"
"github.com/duanhf2012/origin/v2/service"
"github.com/gin-gonic/gin"
"io"
"net/http"
"strings"
"time"
)
type IGinProcessor interface {
Process(data *gin.Context) (*gin.Context, error)
}
type GinModule struct {
service.Module
*gin.Engine
srv *http.Server
listenAddr string
handleTimeout time.Duration
processor []IGinProcessor
}
func (gm *GinModule) Init(addr string, handleTimeout time.Duration, engine *gin.Engine) {
gm.listenAddr = addr
gm.handleTimeout = handleTimeout
gm.Engine = engine
}
func (gm *GinModule) SetupDataProcessor(processor ...IGinProcessor) {
gm.processor = processor
}
func (gm *GinModule) AppendDataProcessor(processor ...IGinProcessor) {
gm.processor = append(gm.processor, processor...)
}
func (gm *GinModule) OnInit() error {
if gm.Engine == nil {
gm.Engine = gin.Default()
}
gm.srv = &http.Server{
Addr: gm.listenAddr,
Handler: gm.Engine,
}
gm.Engine.Use(Logger())
gm.Engine.Use(gin.Recovery())
gm.GetEventProcessor().RegEventReceiverFunc(event.Sys_Event_Gin_Event, gm.GetEventHandler(), gm.eventHandler)
return nil
}
func (gm *GinModule) eventHandler(ev event.IEvent) {
ginEvent := ev.(*GinEvent)
for _, handler := range ginEvent.handlersChain {
handler(&ginEvent.c)
}
//ginEvent.chanWait <- struct{}{}
}
func (gm *GinModule) Start() {
gm.srv.Addr = gm.listenAddr
log.Info("http start listen", log.Any("addr", gm.listenAddr))
go func() {
err := gm.srv.ListenAndServe()
if err != nil {
log.Error("ListenAndServe error", log.Any("error", err.Error()))
}
}()
}
func (gm *GinModule) StartTLS(certFile, keyFile string) {
log.Info("http start listen", log.Any("addr", gm.listenAddr))
go func() {
err := gm.srv.ListenAndServeTLS(certFile, keyFile)
if err != nil {
log.Fatal("ListenAndServeTLS error", log.Any("error", err.Error()))
}
}()
}
func (gm *GinModule) Stop(ctx context.Context) {
if err := gm.srv.Shutdown(ctx); err != nil {
log.Error("Server Shutdown", log.Any("error", err))
}
}
type SafeContext struct {
*gin.Context
chanWait chan struct{}
}
func (c *SafeContext) JSONAndDone(code int, obj any) {
c.Context.JSON(code, obj)
c.Done()
}
func (c *SafeContext) AsciiJSONAndDone(code int, obj any) {
c.Context.AsciiJSON(code, obj)
c.Done()
}
func (c *SafeContext) PureJSONAndDone(code int, obj any) {
c.Context.PureJSON(code, obj)
c.Done()
}
func (c *SafeContext) XMLAndDone(code int, obj any) {
c.Context.XML(code, obj)
c.Done()
}
func (c *SafeContext) YAMLAndDone(code int, obj any) {
c.Context.YAML(code, obj)
c.Done()
}
func (c *SafeContext) TOMLAndDone(code int, obj any) {
c.Context.TOML(code, obj)
c.Done()
}
func (c *SafeContext) ProtoBufAndDone(code int, obj any) {
c.Context.ProtoBuf(code, obj)
c.Done()
}
func (c *SafeContext) StringAndDone(code int, format string, values ...any) {
c.Context.String(code, format, values...)
c.Done()
}
func (c *SafeContext) RedirectAndDone(code int, location string) {
c.Context.Redirect(code, location)
c.Done()
}
func (c *SafeContext) DataAndDone(code int, contentType string, data []byte) {
c.Context.Data(code, contentType, data)
c.Done()
}
func (c *SafeContext) DataFromReaderAndDone(code int, contentLength int64, contentType string, reader io.Reader, extraHeaders map[string]string) {
c.DataFromReader(code, contentLength, contentType, reader, extraHeaders)
c.Done()
}
func (c *SafeContext) HTMLAndDone(code int, name string, obj any) {
c.Context.HTML(code, name, obj)
c.Done()
}
func (c *SafeContext) IndentedJSONAndDone(code int, obj any) {
c.Context.IndentedJSON(code, obj)
c.Done()
}
func (c *SafeContext) SecureJSONAndDone(code int, obj any) {
c.Context.SecureJSON(code, obj)
c.Done()
}
func (c *SafeContext) JSONPAndDone(code int, obj any) {
c.Context.JSONP(code, obj)
c.Done()
}
func (c *SafeContext) Done() {
c.chanWait <- struct{}{}
}
type GinEvent struct {
handlersChain []SafeHandlerFunc
c SafeContext
}
type SafeHandlerFunc func(*SafeContext)
func (ge *GinEvent) GetEventType() event.EventType {
return event.Sys_Event_Gin_Event
}
func (gm *GinModule) handleMethod(httpMethod, relativePath string, handlers ...SafeHandlerFunc) gin.IRoutes {
return gm.Engine.Handle(httpMethod, relativePath, func(c *gin.Context) {
for _, p := range gm.processor {
_, err := p.Process(c)
if err != nil {
return
}
}
var ev GinEvent
chanWait := make(chan struct{}, 2)
ev.c.chanWait = chanWait
ev.handlersChain = handlers
ev.c.Context = c
gm.NotifyEvent(&ev)
ctx, cancel := context.WithTimeout(context.Background(), gm.handleTimeout)
defer cancel()
select {
case <-ctx.Done():
log.Error("GinModule process timeout", log.Any("path", c.Request.URL.Path))
c.AbortWithStatus(http.StatusRequestTimeout)
case <-chanWait:
}
})
}
// GET 回调处理是在gin协程中
func (gm *GinModule) GET(relativePath string, handlers ...gin.HandlerFunc) gin.IRoutes {
return gm.Engine.GET(relativePath, handlers...)
}
// POST 回调处理是在gin协程中
func (gm *GinModule) POST(relativePath string, handlers ...gin.HandlerFunc) gin.IRoutes {
return gm.Engine.POST(relativePath, handlers...)
}
// DELETE 回调处理是在gin协程中
func (gm *GinModule) DELETE(relativePath string, handlers ...gin.HandlerFunc) gin.IRoutes {
return gm.Engine.DELETE(relativePath, handlers...)
}
// PATCH 回调处理是在gin协程中
func (gm *GinModule) PATCH(relativePath string, handlers ...gin.HandlerFunc) gin.IRoutes {
return gm.Engine.PATCH(relativePath, handlers...)
}
// Put 回调处理是在gin协程中
func (gm *GinModule) Put(relativePath string, handlers ...gin.HandlerFunc) gin.IRoutes {
return gm.Engine.PUT(relativePath, handlers...)
}
// SafeGET 回调处理是在service协程中
func (gm *GinModule) SafeGET(relativePath string, handlers ...SafeHandlerFunc) gin.IRoutes {
return gm.handleMethod(http.MethodGet, relativePath, handlers...)
}
// SafePOST 回调处理是在service协程中
func (gm *GinModule) SafePOST(relativePath string, handlers ...SafeHandlerFunc) gin.IRoutes {
return gm.handleMethod(http.MethodPost, relativePath, handlers...)
}
// SafeDELETE 回调处理是在service协程中
func (gm *GinModule) SafeDELETE(relativePath string, handlers ...SafeHandlerFunc) gin.IRoutes {
return gm.handleMethod(http.MethodDelete, relativePath, handlers...)
}
// SafePATCH 回调处理是在service协程中
func (gm *GinModule) SafePATCH(relativePath string, handlers ...SafeHandlerFunc) gin.IRoutes {
return gm.handleMethod(http.MethodPatch, relativePath, handlers...)
}
// SafePut 回调处理是在service协程中
func (gm *GinModule) SafePut(relativePath string, handlers ...SafeHandlerFunc) gin.IRoutes {
return gm.handleMethod(http.MethodPut, relativePath, handlers...)
}
func GetIPWithProxyHeaders(c *gin.Context) string {
// 尝试从 X-Real-IP 头部获取真实 IP
ip := c.GetHeader("X-Real-IP")
// 如果 X-Real-IP 头部不存在,则尝试从 X-Forwarded-For 头部获取
if ip == "" {
ip = c.GetHeader("X-Forwarded-For")
}
// 如果两者都不存在,则使用默认的 ClientIP 方法获取 IP
if ip == "" {
ip = c.ClientIP()
}
return ip
}
func GetIPWithValidatedProxyHeaders(c *gin.Context) string {
// 获取代理头部
proxyHeaders := c.Request.Header.Get("X-Real-IP,X-Forwarded-For")
// 分割代理头部,取第一个 IP 作为真实 IP
ips := strings.Split(proxyHeaders, ",")
ip := strings.TrimSpace(ips[0])
// 如果 IP 格式合法,则使用获取到的 IP否则使用默认的 ClientIP 方法获取
if isValidIP(ip) {
return ip
} else {
ip = c.ClientIP()
return ip
}
}
// isValidIP 判断 IP 格式是否合法
func isValidIP(ip string) bool {
// 此处添加自定义的 IP 格式验证逻辑
// 例如,使用正则表达式验证 IP 格式
// ...
if ip == "" {
return false
}
return true
}

View File

@@ -0,0 +1,79 @@
package ginmodule
import (
"fmt"
"github.com/duanhf2012/origin/v2/log"
"github.com/gin-gonic/gin"
"time"
)
// Logger 是一个自定义的日志中间件
func Logger() gin.HandlerFunc {
return func(c *gin.Context) {
// 处理请求前记录日志
// 开始时间
startTime := time.Now()
// 调用该请求的剩余处理程序
c.Next()
// 结束时间
endTime := time.Now()
// 执行时间
latencyTime := endTime.Sub(startTime)
// 请求IP
clientIP := c.ClientIP()
// remoteIP := c.RemoteIP()
// 请求方式
reqMethod := c.Request.Method
// 请求路由
reqUri := c.Request.RequestURI
// 请求协议
reqProto := c.Request.Proto
// 请求来源
repReferer := c.Request.Referer()
// 请求UA
reqUA := c.Request.UserAgent()
// 请求响应内容长度
resLength := c.Writer.Size()
if resLength < 0 {
resLength = 0
}
// 响应状态码
statusCode := c.Writer.Status()
log.Debug(fmt.Sprintf(
"%s | %3d | %s %10s | \033[44;37m%-6s\033[0m %s %s | %10v | \"%s\" \"%s\"",
colorForStatus(statusCode),
statusCode,
colorForStatus(0),
clientIP,
// remoteIP,
reqMethod,
reqUri,
reqProto,
latencyTime,
reqUA,
repReferer,
))
}
}
// colorForStatus 根据 HTTP 状态码返回 ANSI 颜色代码
func colorForStatus(code int) string {
switch {
case code >= 200 && code < 300:
return "\033[42;1;37m" // green
case code >= 300 && code < 400:
return "\033[34m" // blue
case code >= 400 && code < 500:
return "\033[33m" // yellow
case code == 0:
return "\033[0m" // cancel
default:
return "\033[31m" // red
}
}

View File

@@ -0,0 +1,210 @@
package kcpmodule
import (
"fmt"
"github.com/duanhf2012/origin/v2/event"
"github.com/duanhf2012/origin/v2/log"
"github.com/duanhf2012/origin/v2/network"
"github.com/duanhf2012/origin/v2/network/processor"
"github.com/duanhf2012/origin/v2/service"
"github.com/xtaci/kcp-go/v5"
"go.mongodb.org/mongo-driver/bson/primitive"
"sync"
)
type KcpModule struct {
service.Module
blockCrypt kcp.BlockCrypt
mapClientLocker sync.RWMutex
mapClient map[string]*Client
process processor.IRawProcessor
kcpServer network.KCPServer
kcpCfg *network.KcpCfg
}
type Client struct {
id string
kcpConn *network.NetConn
kcpModule *KcpModule
}
type KcpPackType int8
const (
KPTConnected KcpPackType = 0
KPTDisConnected KcpPackType = 1
KPTPack KcpPackType = 2
KPTUnknownPack KcpPackType = 3
)
type KcpPack struct {
Type KcpPackType //0表示连接 1表示断开 2表示数据
ClientId string
Data interface{}
RecyclerReaderBytes func(data []byte)
}
func (km *KcpModule) OnInit() error {
if km.kcpCfg == nil || km.process == nil {
return fmt.Errorf("please call the Init function correctly")
}
km.mapClient = make(map[string]*Client, km.kcpCfg.MaxConnNum)
km.GetEventProcessor().RegEventReceiverFunc(event.Sys_Event_Kcp, km.GetEventHandler(), km.kcpEventHandler)
km.process.SetByteOrder(km.kcpCfg.LittleEndian)
km.kcpServer.Init(km.kcpCfg)
km.kcpServer.NewAgent = km.NewAgent
return nil
}
func (km *KcpModule) Init(kcpCfg *network.KcpCfg, process processor.IRawProcessor) {
km.kcpCfg = kcpCfg
km.process = process
}
func (km *KcpModule) Start() error {
return km.kcpServer.Start()
}
func (km *KcpModule) kcpEventHandler(ev event.IEvent) {
e := ev.(*event.Event)
switch KcpPackType(e.IntExt[0]) {
case KPTConnected:
km.process.ConnectedRoute(e.StringExt[0])
case KPTDisConnected:
km.process.DisConnectedRoute(e.StringExt[0])
case KPTUnknownPack:
km.process.UnknownMsgRoute(e.StringExt[0], e.Data, e.AnyExt[0].(func(data []byte)))
case KPTPack:
km.process.MsgRoute(e.StringExt[0], e.Data, e.AnyExt[0].(func(data []byte)))
}
event.DeleteEvent(ev)
}
func (km *KcpModule) SetBlob(blockCrypt kcp.BlockCrypt) {
km.blockCrypt = blockCrypt
}
func (km *KcpModule) OnConnected(c *Client) {
ev := event.NewEvent()
ev.Type = event.Sys_Event_Kcp
ev.IntExt[0] = int64(KPTConnected)
ev.StringExt[0] = c.id
km.NotifyEvent(ev)
}
func (km *KcpModule) OnClose(c *Client) {
ev := event.NewEvent()
ev.Type = event.Sys_Event_Kcp
ev.IntExt[0] = int64(KPTDisConnected)
ev.StringExt[0] = c.id
km.NotifyEvent(ev)
}
func (km *KcpModule) newClient(conn network.Conn) *Client {
km.mapClientLocker.Lock()
defer km.mapClientLocker.Unlock()
pClient := &Client{kcpConn: conn.(*network.NetConn), id: primitive.NewObjectID().Hex()}
pClient.kcpModule = km
km.mapClient[pClient.id] = pClient
return pClient
}
func (km *KcpModule) GetProcessor() processor.IRawProcessor {
return km.process
}
func (km *KcpModule) SendRawMsg(clientId string, data []byte) error {
km.mapClientLocker.Lock()
client, ok := km.mapClient[clientId]
if ok == false {
km.mapClientLocker.Unlock()
return fmt.Errorf("client %s is disconnect", clientId)
}
km.mapClientLocker.Unlock()
return client.kcpConn.WriteMsg(data)
}
func (km *KcpModule) Close(clientId string) {
km.mapClientLocker.Lock()
client, ok := km.mapClient[clientId]
if ok == false {
km.mapClientLocker.Unlock()
return
}
km.mapClientLocker.Unlock()
client.kcpConn.Close()
}
func (km *KcpModule) GetClientIp(clientId string) string {
km.mapClientLocker.Lock()
defer km.mapClientLocker.Unlock()
client, ok := km.mapClient[clientId]
if ok == false {
return ""
}
removeAddr := client.kcpConn.RemoteAddr()
if removeAddr != nil {
return removeAddr.String()
}
return ""
}
func (km *KcpModule) NewAgent(conn network.Conn) network.Agent {
c := km.newClient(conn)
return c
}
func (c *Client) Run() {
defer func() {
if r := recover(); r != nil {
log.StackError(fmt.Sprint(r))
}
}()
c.kcpModule.OnConnected(c)
for c.kcpConn != nil {
c.kcpConn.SetReadDeadline(*c.kcpModule.kcpCfg.ReadDeadlineMill)
msgBuff, err := c.kcpConn.ReadMsg()
if err != nil {
log.Debug("read client failed", log.ErrorField("error", err), log.String("clientId", c.id))
break
}
data, err := c.kcpModule.process.Unmarshal(c.id, msgBuff)
if err != nil {
ev := event.NewEvent()
ev.Type = event.Sys_Event_Kcp
ev.IntExt[0] = int64(KPTUnknownPack)
ev.StringExt[0] = c.id
ev.Data = msgBuff
ev.AnyExt[0] = c.kcpConn.GetRecyclerReaderBytes()
c.kcpModule.NotifyEvent(ev)
continue
}
ev := event.NewEvent()
ev.Type = event.Sys_Event_Kcp
ev.IntExt[0] = int64(KPTPack)
ev.StringExt[0] = c.id
ev.Data = data
ev.AnyExt[0] = c.kcpConn.GetRecyclerReaderBytes()
c.kcpModule.NotifyEvent(ev)
}
}
func (c *Client) OnClose() {
c.kcpModule.OnClose(c)
c.kcpModule.mapClientLocker.Lock()
delete(c.kcpModule.mapClient, c.id)
c.kcpModule.mapClientLocker.Unlock()
}

View File

@@ -0,0 +1,219 @@
package tcpmodule
import (
"fmt"
"github.com/duanhf2012/origin/v2/event"
"github.com/duanhf2012/origin/v2/log"
"github.com/duanhf2012/origin/v2/network"
"github.com/duanhf2012/origin/v2/network/processor"
"github.com/duanhf2012/origin/v2/service"
"go.mongodb.org/mongo-driver/bson/primitive"
"sync"
"time"
)
type TcpModule struct {
tcpServer network.TCPServer
service.Module
mapClientLocker sync.RWMutex
mapClient map[string]*Client
process processor.IRawProcessor
tcpCfg *TcpCfg
}
type TcpPackType int8
const (
TPTConnected TcpPackType = 0
TPTDisConnected TcpPackType = 1
TPTPack TcpPackType = 2
TPTUnknownPack TcpPackType = 3
)
type TcpPack struct {
Type TcpPackType //0表示连接 1表示断开 2表示数据
ClientId string
Data interface{}
RecyclerReaderBytes func(data []byte)
}
type Client struct {
id string
tcpConn *network.NetConn
tcpModule *TcpModule
}
type TcpCfg struct {
ListenAddr string //监听地址
MaxConnNum int //最大连接数
PendingWriteNum int //写channel最大消息数量
LittleEndian bool //是否小端序
LenMsgLen int //消息头占用byte数量只能是1byte,2byte,4byte。如果是4byte意味着消息最大可以是math.MaxUint32(4GB)
MinMsgLen uint32 //最小消息长度
MaxMsgLen uint32 //最大消息长度,超过判定不合法,断开连接
ReadDeadlineSecond time.Duration //读超时
WriteDeadlineSecond time.Duration //写超时
}
func (tm *TcpModule) OnInit() error {
if tm.tcpCfg == nil || tm.process == nil {
return fmt.Errorf("please call the Init function correctly")
}
//2.初始化网络模块
tm.tcpServer.Addr = tm.tcpCfg.ListenAddr
tm.tcpServer.MaxConnNum = tm.tcpCfg.MaxConnNum
tm.tcpServer.PendingWriteNum = tm.tcpCfg.PendingWriteNum
tm.tcpServer.LittleEndian = tm.tcpCfg.LittleEndian
tm.tcpServer.LenMsgLen = tm.tcpCfg.LenMsgLen
tm.tcpServer.MinMsgLen = tm.tcpCfg.MinMsgLen
tm.tcpServer.MaxMsgLen = tm.tcpCfg.MaxMsgLen
tm.tcpServer.ReadDeadline = tm.tcpCfg.ReadDeadlineSecond * time.Second
tm.tcpServer.WriteDeadline = tm.tcpCfg.WriteDeadlineSecond * time.Second
tm.mapClient = make(map[string]*Client, tm.tcpServer.MaxConnNum)
tm.tcpServer.NewAgent = tm.NewClient
//3.设置解析处理器
tm.process.SetByteOrder(tm.tcpCfg.LittleEndian)
//4.设置网络事件处理
tm.GetEventProcessor().RegEventReceiverFunc(event.Sys_Event_Tcp, tm.GetEventHandler(), tm.tcpEventHandler)
return nil
}
func (tm *TcpModule) Init(tcpCfg *TcpCfg, process processor.IRawProcessor) {
tm.tcpCfg = tcpCfg
tm.process = process
}
func (tm *TcpModule) Start() error {
return tm.tcpServer.Start()
}
func (tm *TcpModule) tcpEventHandler(ev event.IEvent) {
pack := ev.(*event.Event).Data.(TcpPack)
switch pack.Type {
case TPTConnected:
tm.process.ConnectedRoute(pack.ClientId)
case TPTDisConnected:
tm.process.DisConnectedRoute(pack.ClientId)
case TPTUnknownPack:
tm.process.UnknownMsgRoute(pack.ClientId, pack.Data, pack.RecyclerReaderBytes)
case TPTPack:
tm.process.MsgRoute(pack.ClientId, pack.Data, pack.RecyclerReaderBytes)
}
}
func (tm *TcpModule) NewClient(conn network.Conn) network.Agent {
tm.mapClientLocker.Lock()
defer tm.mapClientLocker.Unlock()
clientId := primitive.NewObjectID().Hex()
pClient := &Client{tcpConn: conn.(*network.NetConn), id: clientId}
pClient.tcpModule = tm
tm.mapClient[clientId] = pClient
return pClient
}
func (slf *Client) GetId() string {
return slf.id
}
func (slf *Client) Run() {
defer func() {
if r := recover(); r != nil {
log.StackError(fmt.Sprint(r))
}
}()
slf.tcpModule.NotifyEvent(&event.Event{Type: event.Sys_Event_Tcp, Data: TcpPack{ClientId: slf.id, Type: TPTConnected}})
for slf.tcpConn != nil {
slf.tcpConn.SetReadDeadline(slf.tcpModule.tcpServer.ReadDeadline)
bytes, err := slf.tcpConn.ReadMsg()
if err != nil {
log.Debug("read client failed", log.ErrorField("error", err), log.String("clientId", slf.id))
break
}
data, err := slf.tcpModule.process.Unmarshal(slf.id, bytes)
if err != nil {
slf.tcpModule.NotifyEvent(&event.Event{Type: event.Sys_Event_Tcp, Data: TcpPack{ClientId: slf.id, Type: TPTUnknownPack, Data: bytes, RecyclerReaderBytes: slf.tcpConn.GetRecyclerReaderBytes()}})
continue
}
slf.tcpModule.NotifyEvent(&event.Event{Type: event.Sys_Event_Tcp, Data: TcpPack{ClientId: slf.id, Type: TPTPack, Data: data, RecyclerReaderBytes: slf.tcpConn.GetRecyclerReaderBytes()}})
}
}
func (slf *Client) OnClose() {
slf.tcpModule.NotifyEvent(&event.Event{Type: event.Sys_Event_Tcp, Data: TcpPack{ClientId: slf.id, Type: TPTDisConnected}})
slf.tcpModule.mapClientLocker.Lock()
defer slf.tcpModule.mapClientLocker.Unlock()
delete(slf.tcpModule.mapClient, slf.GetId())
}
func (tm *TcpModule) SendMsg(clientId string, msg interface{}) error {
tm.mapClientLocker.Lock()
client, ok := tm.mapClient[clientId]
if ok == false {
tm.mapClientLocker.Unlock()
return fmt.Errorf("client %s is disconnect", clientId)
}
tm.mapClientLocker.Unlock()
bytes, err := tm.process.Marshal(clientId, msg)
if err != nil {
return err
}
return client.tcpConn.WriteMsg(bytes)
}
func (tm *TcpModule) Close(clientId string) {
tm.mapClientLocker.Lock()
defer tm.mapClientLocker.Unlock()
client, ok := tm.mapClient[clientId]
if ok == false {
return
}
if client.tcpConn != nil {
client.tcpConn.Close()
}
log.SWarn("close client:", clientId)
return
}
func (tm *TcpModule) GetClientIp(clientId string) string {
tm.mapClientLocker.Lock()
defer tm.mapClientLocker.Unlock()
pClient, ok := tm.mapClient[clientId]
if ok == false {
return ""
}
return pClient.tcpConn.GetRemoteIp()
}
func (tm *TcpModule) SendRawMsg(clientId string, msg []byte) error {
tm.mapClientLocker.Lock()
client, ok := tm.mapClient[clientId]
if ok == false {
tm.mapClientLocker.Unlock()
return fmt.Errorf("client %s is disconnect", clientId)
}
tm.mapClientLocker.Unlock()
return client.tcpConn.WriteMsg(msg)
}
func (tm *TcpModule) GetConnNum() int {
tm.mapClientLocker.Lock()
connNum := len(tm.mapClient)
tm.mapClientLocker.Unlock()
return connNum
}
func (tm *TcpModule) GetProcessor() processor.IRawProcessor {
return tm.process
}

View File

@@ -0,0 +1,203 @@
package wsmodule
import (
"fmt"
"github.com/duanhf2012/origin/v2/event"
"github.com/duanhf2012/origin/v2/log"
"github.com/duanhf2012/origin/v2/network"
"github.com/duanhf2012/origin/v2/network/processor"
"github.com/duanhf2012/origin/v2/service"
"go.mongodb.org/mongo-driver/bson/primitive"
"sync"
)
type WSModule struct {
service.Module
WSServer network.WSServer
mapClientLocker sync.RWMutex
mapClient map[string]*WSClient
process processor.IRawProcessor
wsCfg *WSCfg
}
type WSClient struct {
id string
wsConn *network.WSConn
wsModule *WSModule
}
type WSCfg struct {
ListenAddr string
MaxConnNum int
PendingWriteNum int
MaxMsgLen uint32
LittleEndian bool //是否小端序
}
type WSPackType int8
const (
WPTConnected WSPackType = 0
WPTDisConnected WSPackType = 1
WPTPack WSPackType = 2
WPTUnknownPack WSPackType = 3
)
type WSPack struct {
Type WSPackType //0表示连接 1表示断开 2表示数据
MsgProcessor processor.IRawProcessor
ClientId string
Data any
}
func (ws *WSModule) OnInit() error {
if ws.wsCfg == nil || ws.process == nil {
return fmt.Errorf("please call the Init function correctly")
}
ws.WSServer.MaxConnNum = ws.wsCfg.MaxConnNum
ws.WSServer.PendingWriteNum = ws.wsCfg.PendingWriteNum
ws.WSServer.MaxMsgLen = ws.wsCfg.MaxMsgLen
ws.WSServer.Addr = ws.wsCfg.ListenAddr
//3.设置解析处理器
ws.process.SetByteOrder(ws.wsCfg.LittleEndian)
ws.mapClient = make(map[string]*WSClient, ws.WSServer.MaxConnNum)
ws.WSServer.NewAgent = ws.NewWSClient
//4.设置网络事件处理
ws.GetEventProcessor().RegEventReceiverFunc(event.Sys_Event_WebSocket, ws.GetEventHandler(), ws.wsEventHandler)
return nil
}
func (ws *WSModule) Init(wsCfg *WSCfg, process processor.IRawProcessor) {
ws.wsCfg = wsCfg
ws.process = process
}
func (ws *WSModule) Start() error {
return ws.WSServer.Start()
}
func (ws *WSModule) wsEventHandler(ev event.IEvent) {
pack := ev.(*event.Event).Data.(*WSPack)
switch pack.Type {
case WPTConnected:
ws.process.ConnectedRoute(pack.ClientId)
case WPTDisConnected:
ws.process.DisConnectedRoute(pack.ClientId)
case WPTUnknownPack:
ws.process.UnknownMsgRoute(pack.ClientId, pack.Data, ws.recyclerReaderBytes)
case WPTPack:
ws.process.MsgRoute(pack.ClientId, pack.Data, ws.recyclerReaderBytes)
}
}
func (ws *WSModule) recyclerReaderBytes([]byte) {
}
func (ws *WSModule) NewWSClient(conn *network.WSConn) network.Agent {
ws.mapClientLocker.Lock()
defer ws.mapClientLocker.Unlock()
pClient := &WSClient{wsConn: conn, id: primitive.NewObjectID().Hex()}
pClient.wsModule = ws
ws.mapClient[pClient.id] = pClient
return pClient
}
func (wc *WSClient) GetId() string {
return wc.id
}
func (wc *WSClient) Run() {
wc.wsModule.NotifyEvent(&event.Event{Type: event.Sys_Event_WebSocket, Data: &WSPack{ClientId: wc.id, Type: WPTConnected}})
for {
bytes, err := wc.wsConn.ReadMsg()
if err != nil {
log.Debug("read client is error", log.String("clientId", wc.id), log.ErrorField("err", err))
break
}
data, err := wc.wsModule.process.Unmarshal(wc.id, bytes)
if err != nil {
wc.wsModule.NotifyEvent(&event.Event{Type: event.Sys_Event_WebSocket, Data: &WSPack{ClientId: wc.id, Type: WPTUnknownPack, Data: bytes}})
continue
}
wc.wsModule.NotifyEvent(&event.Event{Type: event.Sys_Event_WebSocket, Data: &WSPack{ClientId: wc.id, Type: WPTPack, Data: data}})
}
}
func (wc *WSClient) OnClose() {
wc.wsModule.NotifyEvent(&event.Event{Type: event.Sys_Event_WebSocket, Data: &WSPack{ClientId: wc.id, Type: WPTDisConnected}})
wc.wsModule.mapClientLocker.Lock()
defer wc.wsModule.mapClientLocker.Unlock()
delete(wc.wsModule.mapClient, wc.GetId())
}
func (ws *WSModule) GetProcessor() processor.IRawProcessor {
return ws.process
}
func (ws *WSModule) GetClientIp(clientId string) string {
ws.mapClientLocker.Lock()
defer ws.mapClientLocker.Unlock()
pClient, ok := ws.mapClient[clientId]
if ok == false {
return ""
}
return pClient.wsConn.RemoteAddr().String()
}
func (ws *WSModule) Close(clientId string) {
ws.mapClientLocker.Lock()
defer ws.mapClientLocker.Unlock()
client, ok := ws.mapClient[clientId]
if ok == false {
return
}
if client.wsConn != nil {
client.wsConn.Close()
}
return
}
func (ws *WSModule) SendMsg(clientId string, msg interface{}) error {
ws.mapClientLocker.Lock()
client, ok := ws.mapClient[clientId]
if ok == false {
ws.mapClientLocker.Unlock()
return fmt.Errorf("client %s is disconnect", clientId)
}
ws.mapClientLocker.Unlock()
bytes, err := ws.process.Marshal(clientId, msg)
if err != nil {
return err
}
return client.wsConn.WriteMsg(bytes)
}
func (ws *WSModule) SendRawMsg(clientId string, msg []byte) error {
ws.mapClientLocker.Lock()
client, ok := ws.mapClient[clientId]
if ok == false {
ws.mapClientLocker.Unlock()
return fmt.Errorf("client %s is disconnect", clientId)
}
ws.mapClientLocker.Unlock()
return client.wsConn.WriteMsg(msg)
}
func (ws *WSModule) SetMessageType(messageType int) {
ws.WSServer.SetMessageType(messageType)
}

View File

@@ -5,11 +5,11 @@ import (
"encoding/json"
"errors"
"fmt"
"github.com/duanhf2012/origin/log"
"github.com/duanhf2012/origin/v2/log"
"strconv"
"time"
"github.com/duanhf2012/origin/service"
"github.com/duanhf2012/origin/v2/service"
"github.com/gomodule/redigo/redis"
)
@@ -66,7 +66,7 @@ func (m *RedisModule) Init(redisCfg *ConfigRedis) {
}
c, err := redis.Dial("tcp", redisServer, opt...)
if err != nil {
log.Error("Connect redis fail reason:%v", err)
log.Error("Connect redis fail", log.ErrorField("err",err))
return nil, err
}
@@ -79,7 +79,7 @@ func (m *RedisModule) Init(redisCfg *ConfigRedis) {
}
_, err := c.Do("PING")
if err != nil {
log.Error("Do PING fail reason:%v", err)
log.Error("Do PING fail reason", log.ErrorField("err",err))
return err
}
return err
@@ -90,18 +90,18 @@ func (m *RedisModule) Init(redisCfg *ConfigRedis) {
func (m *RedisModule) getConn() (redis.Conn, error) {
if m.redisPool == nil {
log.Error("Not Init RedisModule")
return nil, fmt.Errorf("Not Init RedisModule")
return nil, fmt.Errorf("not Init RedisModule")
}
conn := m.redisPool.Get()
if conn == nil {
log.Error("Cannot get connection")
return nil, fmt.Errorf("Cannot get connection")
return nil, fmt.Errorf("cannot get connection")
}
if conn.Err() != nil {
err := conn.Err()
if err != nil {
log.Error("Get Conn have error,reason:%v", err)
log.Error("get Conn have error", log.ErrorField("err",err))
}
conn.Close()
return nil, err
@@ -118,7 +118,7 @@ func (m *RedisModule) TestPingRedis() error {
err = m.redisPool.TestOnBorrow(conn, time.Now())
if err != nil {
log.Error("TestOnBorrow fail,reason:%v", err)
log.Error("TestOnBorrow fail", log.ErrorField("err",err))
return err
}
@@ -153,7 +153,7 @@ func (m *RedisModule) SetStringJSONExpire(key interface{}, val interface{}, expi
func (m *RedisModule) setStringByExpire(key, value, expire interface{}) error {
if key == "" {
return errors.New("Key Is Empty")
return errors.New("key is empty")
}
conn, err := m.getConn()
@@ -171,7 +171,7 @@ func (m *RedisModule) setStringByExpire(key, value, expire interface{}) error {
}
if retErr != nil {
log.Error("setStringByExpire fail,reason:%v", retErr)
log.Error("setStringByExpire fail", log.ErrorField("err",retErr))
return retErr
}
@@ -254,7 +254,7 @@ func (m *RedisModule) setMuchStringByExpire(mapInfo map[interface{}]interface{},
}
if serr != nil {
log.Error("setMuchStringByExpire fail,reason:%v", serr)
log.Error("setMuchStringByExpire fail",log.ErrorField("err",serr))
conn.Do("DISCARD")
return serr
} else {
@@ -262,7 +262,7 @@ func (m *RedisModule) setMuchStringByExpire(mapInfo map[interface{}]interface{},
}
if err != nil {
log.Error("setMuchStringByExpire fail,reason:%v", err)
log.Error("setMuchStringByExpire fail", log.ErrorField("err",err))
}
return err
@@ -277,7 +277,7 @@ func (m *RedisModule) GetString(key interface{}) (string, error) {
ret, err := conn.Do("GET", key)
if err != nil {
log.Error("GetString fail,reason:%v", err)
log.Error("GetString fail", log.ErrorField("err",err))
return "", err
}
@@ -298,7 +298,7 @@ func (m *RedisModule) GetStringJSON(key string, st interface{}) error {
ret, err := conn.Do("GET", key)
if err != nil {
log.Error("GetStringJSON fail,reason:%v", err)
log.Error("GetStringJSON fail", log.ErrorField("err",err))
return err
}
@@ -315,7 +315,7 @@ func (m *RedisModule) GetStringJSON(key string, st interface{}) error {
}
if err = json.Unmarshal(str, st); err != nil {
log.Error("GetStringJSON fail json.Unmarshal is error:%s,%s,reason:%v", key, string(str), err)
log.Errorf("GetStringJSON fail json.Unmarshal is error:%s,%s,reason:%v", key, string(str), err)
return err
}
@@ -336,13 +336,13 @@ func (m *RedisModule) GetStringMap(keys []string) (retMap map[string]string, err
// 开始Send数据
err = conn.Send("MULTI")
if err != nil {
log.Error("GetMuchString fail %v", err)
log.Errorf("GetMuchString fail %v", err)
return nil, err
}
for _, val := range keys {
err = conn.Send("GET", val)
if err != nil {
log.Error("GetMuchString fail,reason:%v", err)
log.Errorf("GetMuchString fail,reason:%v", err)
conn.Do("DISCARD")
return nil, err
}
@@ -351,7 +351,7 @@ func (m *RedisModule) GetStringMap(keys []string) (retMap map[string]string, err
// 执行命令
ret, err := conn.Do("EXEC")
if err != nil {
log.Error("GetMuchString fail %v", err)
log.Errorf("GetMuchString fail %v", err)
return
}
@@ -383,7 +383,7 @@ func (m *RedisModule) ExistsKey(key interface{}) (bool, error) {
ret, err := conn.Do("EXISTS", key)
if err != nil {
log.Error("ExistsKey fail, reason:%v", err)
log.Errorf("ExistsKey fail, reason:%v", err)
return false, err
}
retValue, ok := ret.(int64)
@@ -404,7 +404,7 @@ func (m *RedisModule) DelString(key interface{}) error {
ret, err := conn.Do("DEL", key)
if err != nil {
log.Error("DelString fail, reason:%v", err)
log.Errorf("DelString fail, reason:%v", err)
return err
}
@@ -439,7 +439,7 @@ func (m *RedisModule) DelStringKeyList(keys []interface{}) (map[interface{}]bool
for _, val := range keys {
err = conn.Send("DEL", val)
if err != nil {
log.Error("DelMuchString fail,reason:%v", err)
log.Errorf("DelMuchString fail,reason:%v", err)
conn.Do("DISCARD")
return nil, err
}
@@ -448,7 +448,7 @@ func (m *RedisModule) DelStringKeyList(keys []interface{}) (map[interface{}]bool
ret, err := conn.Do("EXEC")
if err != nil {
log.Error("DelMuchString fail,reason:%v", err)
log.Errorf("DelMuchString fail,reason:%v", err)
return nil, err
}
@@ -474,7 +474,7 @@ func (m *RedisModule) DelStringKeyList(keys []interface{}) (map[interface{}]bool
func (m *RedisModule) SetHash(redisKey, hashKey, value interface{}) error {
if redisKey == "" || hashKey == "" {
return errors.New("Key Is Empty")
return errors.New("key is empty")
}
conn, err := m.getConn()
if err != nil {
@@ -484,16 +484,15 @@ func (m *RedisModule) SetHash(redisKey, hashKey, value interface{}) error {
_, retErr := conn.Do("HSET", redisKey, hashKey, value)
if retErr != nil {
log.Error("SetHash fail,reason:%v", retErr)
log.Errorf("SetHash fail,reason:%v", retErr)
}
return retErr
}
// GetRedisAllHashJSON ...
func (m *RedisModule) GetAllHashJSON(redisKey string) (map[string]string, error) {
if redisKey == "" {
return nil, errors.New("Key Is Empty")
return nil, errors.New("key is empty")
}
conn, err := m.getConn()
if err != nil {
@@ -503,7 +502,7 @@ func (m *RedisModule) GetAllHashJSON(redisKey string) (map[string]string, error)
value, err := conn.Do("HGETALL", redisKey)
if err != nil {
log.Error("GetAllHashJSON fail,reason:%v", err)
log.Errorf("GetAllHashJSON fail,reason:%v", err)
return nil, err
}
@@ -513,7 +512,7 @@ func (m *RedisModule) GetAllHashJSON(redisKey string) (map[string]string, error)
func (m *RedisModule) GetHash(redisKey interface{}, fieldKey interface{}) (string, error) {
if redisKey == "" || fieldKey == "" {
log.Error("GetHashValueByKey key is empty!")
return "", errors.New("Key Is Empty")
return "", errors.New("key is empty")
}
conn, err := m.getConn()
if err != nil {
@@ -523,11 +522,11 @@ func (m *RedisModule) GetHash(redisKey interface{}, fieldKey interface{}) (strin
value, err := conn.Do("HGET", redisKey, fieldKey)
if err != nil {
log.Error("GetHashValueByKey fail,reason:%v", err)
log.Errorf("GetHashValueByKey fail,reason:%v", err)
return "", err
}
if value == nil {
return "", errors.New("Reids Get Hash nil")
return "", errors.New("redis get hash nil")
}
return redis.String(value, nil)
@@ -536,7 +535,7 @@ func (m *RedisModule) GetHash(redisKey interface{}, fieldKey interface{}) (strin
func (m *RedisModule) GetMuchHash(args ...interface{}) ([]string, error) {
if len(args) < 2 {
log.Error("GetHashValueByHashKeyList key len less than two!")
return nil, errors.New("Key Is Empty")
return nil, errors.New("key is empty")
}
conn, err := m.getConn()
if err != nil {
@@ -546,15 +545,15 @@ func (m *RedisModule) GetMuchHash(args ...interface{}) ([]string, error) {
value, err := conn.Do("HMGET", args...)
if err != nil {
log.Error("GetHashValueByKey fail,reason:%v", err)
log.Errorf("GetHashValueByKey fail,reason:%v", err)
return nil, err
}
if value == nil {
return nil, errors.New("Reids Get Hash nil")
return nil, errors.New("redis get hash nil")
}
valueList := value.([]interface{})
retList := []string{}
var retList []string
for _, valueItem := range valueList {
valueByte, ok := valueItem.([]byte)
if !ok {
@@ -572,7 +571,7 @@ func (m *RedisModule) ScanMatchKeys(cursorValue int, redisKey string, count int)
nextCursorValue := 0
if redisKey == "" {
log.Error("ScanMatchKeys key is empty!")
return nextCursorValue, nil, errors.New("Key Is Empty")
return nextCursorValue, nil, errors.New("key is empty")
}
conn, err := m.getConn()
@@ -583,11 +582,11 @@ func (m *RedisModule) ScanMatchKeys(cursorValue int, redisKey string, count int)
value, err := conn.Do("SCAN", cursorValue, "match", redisKey, "count", count)
if err != nil {
log.Error("GetHashValueByKey fail,reason:%v", err)
log.Errorf("GetHashValueByKey fail,reason:%v", err)
return nextCursorValue, nil, err
}
if value == nil {
return nextCursorValue, nil, errors.New("Reids Get Hash nil")
return nextCursorValue, nil, errors.New("redis get hash nil")
}
valueList := value.([]interface{})
@@ -619,7 +618,7 @@ func (m *RedisModule) SetHashMapJSON(redisKey string, mapFieldValue map[interfac
if err == nil {
_, err = conn.Do("HSET", redisKey, symbol, temp)
if err != nil {
log.Error("SetMuchHashJSON fail,reason:%v", err)
log.Errorf("SetMuchHashJSON fail,reason:%v", err)
conn.Send("DISCARD")
return err
}
@@ -628,7 +627,7 @@ func (m *RedisModule) SetHashMapJSON(redisKey string, mapFieldValue map[interfac
// 执行命令
_, err = conn.Do("EXEC")
if err != nil {
log.Error("SetMuchHashJSON fail,reason:%v", err)
log.Errorf("SetMuchHashJSON fail,reason:%v", err)
conn.Send("DISCARD")
}
return err
@@ -643,7 +642,7 @@ func (m *RedisModule) DelHash(args ...interface{}) error {
_, retErr := conn.Do("HDEL", args...)
if retErr != nil {
log.Error("DelMuchHash fail,reason:%v", retErr)
log.Errorf("DelMuchHash fail,reason:%v", retErr)
}
return retErr
}
@@ -669,7 +668,7 @@ func (m *RedisModule) RPushListJSON(key interface{}, value ...interface{}) error
// LPUSH和RPUSH
func (m *RedisModule) setListPush(setType string, args ...interface{}) error {
if setType != "LPUSH" && setType != "RPUSH" {
return errors.New("Redis List Push Type Error,Must Be LPUSH or RPUSH")
return errors.New("redis list push type error,must be LPUSH or RPUSH")
}
conn, err := m.getConn()
if err != nil {
@@ -679,7 +678,7 @@ func (m *RedisModule) setListPush(setType string, args ...interface{}) error {
_, retErr := conn.Do(setType, args...)
if retErr != nil {
log.Error("setList fail,reason:%v", retErr)
log.Errorf("setList fail,reason:%v", retErr)
}
return retErr
}
@@ -697,7 +696,6 @@ func (m *RedisModule) setListJSONPush(setType string, key interface{}, value ...
return m.setListPush(setType, args...)
}
// Lrange ...
func (m *RedisModule) LRangeList(key string, start, end int) ([]string, error) {
conn, err := m.getConn()
if err != nil {
@@ -707,14 +705,14 @@ func (m *RedisModule) LRangeList(key string, start, end int) ([]string, error) {
reply, err := conn.Do("lrange", key, start, end)
if err != nil {
log.Error("SetListJSONRpush fail,reason:%v", err)
log.Errorf("SetListJSONRpush fail,reason:%v", err)
return nil, err
}
return redis.Strings(reply, err)
}
// 获取List的长度
// GetListLen 获取List的长度
func (m *RedisModule) GetListLen(key string) (int, error) {
conn, err := m.getConn()
if err != nil {
@@ -724,13 +722,13 @@ func (m *RedisModule) GetListLen(key string) (int, error) {
reply, err := conn.Do("LLEN", key)
if err != nil {
log.Error("GetListLen fail,reason:%v", err)
log.Errorf("GetListLen fail,reason:%v", err)
return -1, err
}
return redis.Int(reply, err)
}
// 弹出List最后条记录
// RPOPListValue 弹出List最后条记录
func (m *RedisModule) RPOPListValue(key string) (string, error) {
conn, err := m.getConn()
if err != nil {
@@ -750,7 +748,7 @@ func (m *RedisModule) LTrimList(key string, start, end int) error {
_, err = conn.Do("LTRIM", key, start, end)
if err != nil {
log.Error("LtrimListValue fail,reason:%v", err)
log.Errorf("LtrimListValue fail,reason:%v", err)
return err
}
return nil
@@ -782,7 +780,7 @@ func (m *RedisModule) LRange(key string, start, stop int) ([]byte, error) {
return makeListJson(reply.([]interface{}), false), nil
}
// 弹出list(消息队列)数据,数据放入out fromLeft表示是否从左侧弹出 block表示是否阻塞 timeout表示阻塞超时
// ListPopJson 弹出list(消息队列)数据,数据放入out fromLeft表示是否从左侧弹出 block表示是否阻塞 timeout表示阻塞超时
func (m *RedisModule) ListPopJson(key string, fromLeft, block bool, timeout int, out interface{}) error {
b, err := m.ListPop(key, fromLeft, block, timeout)
if err != nil {
@@ -795,7 +793,7 @@ func (m *RedisModule) ListPopJson(key string, fromLeft, block bool, timeout int,
return nil
}
// 弹出list(消息队列)数据 fromLeft表示是否从左侧弹出 block表示是否阻塞 timeout表示阻塞超时
// ListPop 弹出list(消息队列)数据 fromLeft表示是否从左侧弹出 block表示是否阻塞 timeout表示阻塞超时
func (m *RedisModule) ListPop(key string, fromLeft, block bool, timeout int) ([]byte, error) {
cmd := ""
if fromLeft {
@@ -837,7 +835,7 @@ func (m *RedisModule) ListPop(key string, fromLeft, block bool, timeout int) ([]
return b, nil
}
// 有序集合插入Json
// ZADDInsertJson 有序集合插入Json
func (m *RedisModule) ZADDInsertJson(key string, score float64, value interface{}) error {
conn, err := m.getConn()
@@ -851,13 +849,13 @@ func (m *RedisModule) ZADDInsertJson(key string, score float64, value interface{
}
_, err = conn.Do("ZADD", key, score, JsonValue)
if err != nil {
log.Error("ZADDInsertJson fail,reason:%v", err)
log.Errorf("ZADDInsertJson fail,reason:%v", err)
return err
}
return nil
}
// 有序集合插入
// ZADDInsert 有序集合插入
func (m *RedisModule) ZADDInsert(key string, score float64, Data interface{}) error {
conn, err := m.getConn()
if err != nil {
@@ -867,7 +865,7 @@ func (m *RedisModule) ZADDInsert(key string, score float64, Data interface{}) er
_, err = conn.Do("ZADD", key, score, Data)
if err != nil {
log.Error("ZADDInsert fail,reason:%v", err)
log.Errorf("ZADDInsert fail,reason:%v", err)
return err
}
return nil
@@ -897,7 +895,7 @@ func (m *RedisModule) ZRangeJSON(key string, start, stop int, ascend bool, withS
return nil
}
// 取有序set指定排名 ascend=true表示按升序遍历 否则按降序遍历
// ZRange 取有序set指定排名 ascend=true表示按升序遍历 否则按降序遍历
func (m *RedisModule) ZRange(key string, start, stop int, ascend bool, withScores bool) ([]byte, error) {
conn, err := m.getConn()
if err != nil {
@@ -921,7 +919,7 @@ func (m *RedisModule) ZRange(key string, start, stop int, ascend bool, withScore
return makeListJson(reply.([]interface{}), withScores), nil
}
// 获取有序集合长度
// Zcard 获取有序集合长度
func (m *RedisModule) Zcard(key string) (int, error) {
conn, err := m.getConn()
if err != nil {
@@ -1005,7 +1003,7 @@ func (m *RedisModule) ZRangeByScore(key string, start, stop float64, ascend bool
return makeListJson(reply.([]interface{}), withScores), nil
}
// 获取指定member的排名
// ZScore 获取指定member的排名
func (m *RedisModule) ZScore(key string, member interface{}) (float64, error) {
conn, err := m.getConn()
if err != nil {
@@ -1021,7 +1019,7 @@ func (m *RedisModule) ZScore(key string, member interface{}) (float64, error) {
return redis.Float64(reply, err)
}
// 获取指定member的排名
// ZRank 获取指定member的排名
func (m *RedisModule) ZRank(key string, member interface{}, ascend bool) (int, error) {
conn, err := m.getConn()
if err != nil {
@@ -1080,7 +1078,7 @@ func (m *RedisModule) ZREMMulti(key string, member ...interface{}) (int, error)
func (m *RedisModule) HincrbyHashInt(redisKey, hashKey string, value int) error {
if redisKey == "" || hashKey == "" {
return errors.New("Key Is Empty")
return errors.New("key is empty")
}
conn, err := m.getConn()
if err != nil {
@@ -1090,7 +1088,7 @@ func (m *RedisModule) HincrbyHashInt(redisKey, hashKey string, value int) error
_, retErr := conn.Do("HINCRBY", redisKey, hashKey, value)
if retErr != nil {
log.Error("HincrbyHashInt fail,reason:%v", retErr)
log.Errorf("HincrbyHashInt fail,reason:%v", retErr)
}
return retErr
@@ -1105,7 +1103,7 @@ func (m *RedisModule) EXPlREInsert(key string, TTl int) error {
_, err = conn.Do("expire", key, TTl)
if err != nil {
log.Error("expire fail,reason:%v", err)
log.Errorf("expire fail,reason:%v", err)
return err
}
return nil
@@ -1131,7 +1129,7 @@ func (m *RedisModule) Keys(key string) ([]string, error) {
ret, err := conn.Do("KEYS", key)
if err != nil {
log.Error("KEYS fail, reason:%v", err)
log.Errorf("KEYS fail, reason:%v", err)
return nil, err
}
retList, ok := ret.([]interface{})
@@ -1140,7 +1138,7 @@ func (m *RedisModule) Keys(key string) ([]string, error) {
return nil, err
}
strs := []string{}
var strs []string
for _, val := range retList {
strVal, ok := val.([]byte)
if !ok {

View File

@@ -2,10 +2,10 @@ package httpservice
import (
"fmt"
"github.com/duanhf2012/origin/event"
"github.com/duanhf2012/origin/network"
"github.com/duanhf2012/origin/service"
"github.com/duanhf2012/origin/util/uuid"
"github.com/duanhf2012/origin/v2/event"
"github.com/duanhf2012/origin/v2/network"
"github.com/duanhf2012/origin/v2/service"
"github.com/duanhf2012/origin/v2/util/uuid"
jsoniter "github.com/json-iterator/go"
"io"
"net/http"
@@ -20,7 +20,6 @@ var DefaultReadTimeout time.Duration = time.Second * 10
var DefaultWriteTimeout time.Duration = time.Second * 10
var DefaultProcessTimeout time.Duration = time.Second * 10
//http redirect
type HttpRedirectData struct {
Url string
CookieList []*http.Cookie
@@ -53,7 +52,7 @@ type IHttpRouter interface {
POST(url string, handle HttpHandle) bool
Router(session *HttpSession)
SetServeFile(method HTTP_METHOD, urlpath string, dirname string) error
SetServeFile(method HTTP_METHOD, urlPath string, dirname string) error
SetFormFileKey(formFileKey string)
GetFormFileKey() string
AddHttpFiltrate(FiltrateFun HttpFiltrate) bool
@@ -93,7 +92,7 @@ type HttpService struct {
listenAddr string
corsHeader *CORSHeader
processTimeout time.Duration
manualStart bool
manualStart bool
}
type HttpFiltrate func(session *HttpSession) bool //true is pass
@@ -118,7 +117,7 @@ func NewHttpHttpRouter() IHttpRouter {
return httpRouter
}
func (slf *HttpSession) GetRawQuery() string{
func (slf *HttpSession) GetRawQuery() string {
return slf.r.URL.RawQuery
}
@@ -215,7 +214,7 @@ func (slf *HttpRouter) analysisRouterUrl(url string) (string, error) {
//替换所有空格
url = strings.ReplaceAll(url, " ", "")
if len(url) <= 1 || url[0] != '/' {
return "", fmt.Errorf("url %s format is error!", url)
return "", fmt.Errorf("url %s format is error", url)
}
//去掉尾部的/
@@ -300,12 +299,12 @@ func (httpService *HttpService) SetHttpRouter(httpRouter IHttpRouter, eventHandl
httpService.RegEventReceiverFunc(event.Sys_Event_Http_Event, eventHandler, httpService.HttpEventHandler)
}
func (slf *HttpRouter) SetServeFile(method HTTP_METHOD, urlpath string, dirname string) error {
func (slf *HttpRouter) SetServeFile(method HTTP_METHOD, urlPath string, dirname string) error {
_, err := os.Stat(dirname)
if err != nil {
return err
}
matchURL, aErr := slf.analysisRouterUrl(urlpath)
matchURL, aErr := slf.analysisRouterUrl(urlPath)
if aErr != nil {
return aErr
}
@@ -350,15 +349,15 @@ func (slf *HttpSession) redirects() {
func (httpService *HttpService) OnInit() error {
iConfig := httpService.GetServiceCfg()
if iConfig == nil {
return fmt.Errorf("%s service config is error!", httpService.GetName())
return fmt.Errorf("%s service config is error", httpService.GetName())
}
httpCfg := iConfig.(map[string]interface{})
addr, ok := httpCfg["ListenAddr"]
if ok == false {
return fmt.Errorf("%s service config is error!", httpService.GetName())
return fmt.Errorf("%s service config is error", httpService.GetName())
}
var readTimeout time.Duration = DefaultReadTimeout
var writeTimeout time.Duration = DefaultWriteTimeout
var readTimeout = DefaultReadTimeout
var writeTimeout = DefaultWriteTimeout
if cfgRead, ok := httpCfg["ReadTimeout"]; ok == true {
readTimeout = time.Duration(cfgRead.(float64)) * time.Millisecond
@@ -370,8 +369,8 @@ func (httpService *HttpService) OnInit() error {
if manualStart, ok := httpCfg["ManualStart"]; ok == true {
httpService.manualStart = manualStart.(bool)
}else{
manualStart =false
} else {
manualStart = false
}
httpService.processTimeout = DefaultProcessTimeout

View File

@@ -3,10 +3,10 @@ package messagequeueservice
import (
"errors"
"fmt"
"github.com/duanhf2012/origin/cluster"
"github.com/duanhf2012/origin/log"
"github.com/duanhf2012/origin/rpc"
"github.com/duanhf2012/origin/util/coroutine"
"github.com/duanhf2012/origin/v2/cluster"
"github.com/duanhf2012/origin/v2/log"
"github.com/duanhf2012/origin/v2/rpc"
"github.com/duanhf2012/origin/v2/util/coroutine"
"strings"
"sync/atomic"
"time"
@@ -16,7 +16,7 @@ type CustomerSubscriber struct {
rpc.IRpcHandler
topic string
subscriber *Subscriber
fromNodeId int
fromNodeId string
callBackRpcMethod string
serviceName string
StartIndex uint64
@@ -24,7 +24,7 @@ type CustomerSubscriber struct {
subscribeMethod SubscribeMethod
customerId string
isStop int32 //退出标记
isStop int32 //退出标记
topicCache []TopicData // 从消息队列中取出来的消息的缓存
}
@@ -37,7 +37,7 @@ const (
MethodLast SubscribeMethod = 1 //Last模式以该消费者上次记录的位置开始订阅
)
func (cs *CustomerSubscriber) trySetSubscriberBaseInfo(rpcHandler rpc.IRpcHandler, ss *Subscriber, topic string, subscribeMethod SubscribeMethod, customerId string, fromNodeId int, callBackRpcMethod string, startIndex uint64, oneBatchQuantity int32) error {
func (cs *CustomerSubscriber) trySetSubscriberBaseInfo(rpcHandler rpc.IRpcHandler, ss *Subscriber, topic string, subscribeMethod SubscribeMethod, customerId string, fromNodeId string, callBackRpcMethod string, startIndex uint64, oneBatchQuantity int32) error {
cs.subscriber = ss
cs.fromNodeId = fromNodeId
cs.callBackRpcMethod = callBackRpcMethod
@@ -62,13 +62,13 @@ func (cs *CustomerSubscriber) trySetSubscriberBaseInfo(rpcHandler rpc.IRpcHandle
cs.serviceName = strRpcMethod[0]
if cluster.HasService(fromNodeId, cs.serviceName) == false {
err := fmt.Errorf("nodeId %d cannot found %s", fromNodeId, cs.serviceName)
err := fmt.Errorf("nodeId %s cannot found %s", fromNodeId, cs.serviceName)
log.SError(err.Error())
return err
}
if cluster.GetCluster().IsNodeConnected(fromNodeId) == false {
err := fmt.Errorf("nodeId %d is disconnect", fromNodeId)
err := fmt.Errorf("nodeId %s is disconnect", fromNodeId)
log.SError(err.Error())
return err
}
@@ -84,8 +84,8 @@ func (cs *CustomerSubscriber) trySetSubscriberBaseInfo(rpcHandler rpc.IRpcHandle
return nil
}
// 开始订阅
func (cs *CustomerSubscriber) Subscribe(rpcHandler rpc.IRpcHandler, ss *Subscriber, topic string, subscribeMethod SubscribeMethod, customerId string, fromNodeId int, callBackRpcMethod string, startIndex uint64, oneBatchQuantity int32) error {
// Subscribe 开始订阅
func (cs *CustomerSubscriber) Subscribe(rpcHandler rpc.IRpcHandler, ss *Subscriber, topic string, subscribeMethod SubscribeMethod, customerId string, fromNodeId string, callBackRpcMethod string, startIndex uint64, oneBatchQuantity int32) error {
err := cs.trySetSubscriberBaseInfo(rpcHandler, ss, topic, subscribeMethod, customerId, fromNodeId, callBackRpcMethod, startIndex, oneBatchQuantity)
if err != nil {
return err
@@ -96,7 +96,7 @@ func (cs *CustomerSubscriber) Subscribe(rpcHandler rpc.IRpcHandler, ss *Subscrib
return nil
}
// 取消订阅
// UnSubscribe 取消订阅
func (cs *CustomerSubscriber) UnSubscribe() {
atomic.StoreInt32(&cs.isStop, 1)
}
@@ -163,14 +163,14 @@ func (cs *CustomerSubscriber) subscribe() bool {
cs.publishToCustomer(topicData)
return true
}
//从持久化数据中来找
topicData = cs.subscriber.dataPersist.FindTopicData(cs.topic, cs.StartIndex, int64(cs.oneBatchQuantity),cs.topicCache[:0])
topicData = cs.subscriber.dataPersist.FindTopicData(cs.topic, cs.StartIndex, int64(cs.oneBatchQuantity), cs.topicCache[:0])
return cs.publishToCustomer(topicData)
}
func (cs *CustomerSubscriber) checkCustomerIsValid() bool {
//1.检查nodeid是否在线不在线直接取消订阅
//1.检查nodeId是否在线不在线直接取消订阅
if cluster.GetCluster().IsNodeConnected(cs.fromNodeId) == false {
return false
}
@@ -213,7 +213,7 @@ func (cs *CustomerSubscriber) publishToCustomer(topicData []TopicData) bool {
}
//推送数据
err := cs.CallNodeWithTimeout(4*time.Minute,cs.fromNodeId, cs.callBackRpcMethod, &dbQueuePublishReq, &dbQueuePushRes)
err := cs.CallNodeWithTimeout(4*time.Minute, cs.fromNodeId, cs.callBackRpcMethod, &dbQueuePublishReq, &dbQueuePushRes)
if err != nil {
time.Sleep(time.Second * 1)
continue

View File

@@ -1,7 +1,7 @@
package messagequeueservice
import (
"github.com/duanhf2012/origin/util/algorithms"
"github.com/duanhf2012/origin/v2/util/algorithms"
"sync"
)
@@ -19,7 +19,7 @@ func (mq *MemoryQueue) Init(cap int32) {
mq.topicQueue = make([]TopicData, cap+1)
}
// 从队尾Push数据
// Push 从队尾Push数据
func (mq *MemoryQueue) Push(topicData *TopicData) bool {
mq.locker.Lock()
defer mq.locker.Unlock()
@@ -51,7 +51,7 @@ func (mq *MemoryQueue) findData(startPos int32, startIndex uint64, limit int32)
} else {
findEndPos = int32(len(mq.topicQueue))
}
if findStartPos >= findEndPos {
return nil, false
}
@@ -87,21 +87,21 @@ func (mq *MemoryQueue) FindData(startIndex uint64, limit int32, dataQueue []Topi
return nil, false
} else if mq.head < mq.tail {
// 队列没有折叠
datas,ret := mq.findData(mq.head + 1, startIndex, limit)
datas, ret := mq.findData(mq.head+1, startIndex, limit)
if ret {
dataQueue = append(dataQueue, datas...)
}
return dataQueue, ret
} else {
// 折叠先找后面的部分
datas,ret := mq.findData(mq.head+1, startIndex, limit)
datas, ret := mq.findData(mq.head+1, startIndex, limit)
if ret {
dataQueue = append(dataQueue, datas...)
return dataQueue, ret
}
// 后面没找到,从前面开始找
datas,ret = mq.findData(0, startIndex, limit)
datas, ret = mq.findData(0, startIndex, limit)
dataQueue = append(dataQueue, datas...)
return dataQueue, ret
}

View File

@@ -3,9 +3,9 @@ package messagequeueservice
import (
"errors"
"fmt"
"github.com/duanhf2012/origin/log"
"github.com/duanhf2012/origin/service"
"github.com/duanhf2012/origin/rpc"
"github.com/duanhf2012/origin/v2/log"
"github.com/duanhf2012/origin/v2/service"
"github.com/duanhf2012/origin/v2/rpc"
"sync"
)
@@ -122,5 +122,5 @@ func (ms *MessageQueueService) RPC_Publish(inParam *rpc.DBQueuePublishReq, outPa
func (ms *MessageQueueService) RPC_Subscribe(req *rpc.DBQueueSubscribeReq, res *rpc.DBQueueSubscribeRes) error {
topicRoom := ms.GetTopicRoom(req.TopicName)
return topicRoom.TopicSubscribe(ms.GetRpcHandler(), req.SubType, int32(req.Method), int(req.FromNodeId), req.RpcMethod, req.TopicName, req.CustomerId, req.StartIndex, req.OneBatchQuantity)
return topicRoom.TopicSubscribe(ms.GetRpcHandler(), req.SubType, int32(req.Method), req.FromNodeId, req.RpcMethod, req.TopicName, req.CustomerId, req.StartIndex, req.OneBatchQuantity)
}

View File

@@ -3,9 +3,9 @@ package messagequeueservice
import (
"errors"
"fmt"
"github.com/duanhf2012/origin/log"
"github.com/duanhf2012/origin/service"
"github.com/duanhf2012/origin/sysmodule/mongodbmodule"
"github.com/duanhf2012/origin/v2/log"
"github.com/duanhf2012/origin/v2/service"
"github.com/duanhf2012/origin/v2/sysmodule/mongodbmodule"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo/options"
"time"
@@ -77,7 +77,7 @@ func (mp *MongoPersist) OnInit() error {
keys = append(keys, "Customer", "Topic")
IndexKey = append(IndexKey, keys)
s := mp.mongo.TakeSession()
if err := s.EnsureUniqueIndex(mp.dbName, CustomerCollectName, IndexKey, true, true,true); err != nil {
if err := s.EnsureUniqueIndex(mp.dbName, CustomerCollectName, IndexKey, true, true, true); err != nil {
log.SError("EnsureUniqueIndex is fail ", err.Error())
return err
}
@@ -162,7 +162,7 @@ func (mp *MongoPersist) persistTopicData(collectionName string, topicData []Topi
_, err := s.Collection(mp.dbName, collectionName).InsertMany(ctx, documents)
if err != nil {
log.SError("PersistTopicData InsertMany fail,collect name is ", collectionName," error:",err.Error())
log.SError("PersistTopicData InsertMany fail,collect name is ", collectionName, " error:", err.Error())
//失败最大重试数量
return retryCount >= mp.retryCount
@@ -171,16 +171,16 @@ func (mp *MongoPersist) persistTopicData(collectionName string, topicData []Topi
return true
}
func (mp *MongoPersist) IsSameDay(timestamp1 int64,timestamp2 int64) bool{
func (mp *MongoPersist) IsSameDay(timestamp1 int64, timestamp2 int64) bool {
t1 := time.Unix(timestamp1, 0)
t2 := time.Unix(timestamp2, 0)
return t1.Year() == t2.Year() && t1.Month() == t2.Month()&&t1.Day() == t2.Day()
return t1.Year() == t2.Year() && t1.Month() == t2.Month() && t1.Day() == t2.Day()
}
// PersistTopicData 持久化数据
func (mp *MongoPersist) PersistTopicData(topic string, topicData []TopicData, retryCount int) ([]TopicData, []TopicData, bool) {
if len(topicData) == 0 {
return nil, nil,true
return nil, nil, true
}
preDate := topicData[0].Seq >> 32
@@ -188,7 +188,7 @@ func (mp *MongoPersist) PersistTopicData(topic string, topicData []TopicData, re
for findPos = 1; findPos < len(topicData); findPos++ {
newDate := topicData[findPos].Seq >> 32
//说明换天了
if mp.IsSameDay(int64(preDate),int64(newDate)) == false {
if mp.IsSameDay(int64(preDate), int64(newDate)) == false {
break
}
}
@@ -201,14 +201,13 @@ func (mp *MongoPersist) PersistTopicData(topic string, topicData []TopicData, re
}
//如果成功
return topicData[findPos:len(topicData)], topicData[0:findPos], true
return topicData[findPos:], topicData[0:findPos], true
}
// FindTopicData 查找数据
func (mp *MongoPersist) findTopicData(topic string, startIndex uint64, limit int64,topicBuff []TopicData) ([]TopicData, bool) {
func (mp *MongoPersist) findTopicData(topic string, startIndex uint64, limit int64, topicBuff []TopicData) ([]TopicData, bool) {
s := mp.mongo.TakeSession()
condition := bson.D{{Key: "_id", Value: bson.D{{Key: "$gt", Value: startIndex}}}}
var findOption options.FindOptions
@@ -238,11 +237,7 @@ func (mp *MongoPersist) findTopicData(topic string, startIndex uint64, limit int
defer cancelAll()
err = cursor.All(ctxAll, &res)
if err != nil {
if err != nil {
log.SError("find collect name ", topic, " is error:", err.Error())
return nil, false
}
log.Error("find collect name ", topic, " is error", log.ErrorAttr("err", err))
return nil, false
}
@@ -251,7 +246,7 @@ func (mp *MongoPersist) findTopicData(topic string, startIndex uint64, limit int
rawData, errM := bson.Marshal(res[i])
if errM != nil {
if errM != nil {
log.SError("collect name ", topic, " Marshal is error:", err.Error())
log.Error("collect name ", topic, " Marshal is error", log.ErrorAttr("err", err))
return nil, false
}
continue
@@ -262,26 +257,26 @@ func (mp *MongoPersist) findTopicData(topic string, startIndex uint64, limit int
return topicBuff, true
}
func (mp *MongoPersist) IsYesterday(startIndex uint64) (bool,string){
timeStamp := int64(startIndex>>32)
func (mp *MongoPersist) IsYesterday(startIndex uint64) (bool, string) {
timeStamp := int64(startIndex >> 32)
startTime := time.Unix(timeStamp, 0).AddDate(0,0,1)
startTime := time.Unix(timeStamp, 0).AddDate(0, 0, 1)
nowTm := time.Now()
return startTime.Year() == nowTm.Year() && startTime.Month() == nowTm.Month()&&startTime.Day() == nowTm.Day(),nowTm.Format("20060102")
return startTime.Year() == nowTm.Year() && startTime.Month() == nowTm.Month() && startTime.Day() == nowTm.Day(), nowTm.Format("20060102")
}
func (mp *MongoPersist) getCollectCount(topic string,today string) (int64 ,error){
func (mp *MongoPersist) getCollectCount(topic string, today string) (int64, error) {
s := mp.mongo.TakeSession()
ctx, cancel := s.GetDefaultContext()
defer cancel()
collectName := fmt.Sprintf("%s_%s", topic, today)
count, err := s.Collection(mp.dbName, collectName).EstimatedDocumentCount(ctx)
return count,err
return count, err
}
// FindTopicData 查找数据
func (mp *MongoPersist) FindTopicData(topic string, startIndex uint64, limit int64,topicBuff []TopicData) []TopicData {
func (mp *MongoPersist) FindTopicData(topic string, startIndex uint64, limit int64, topicBuff []TopicData) []TopicData {
//某表找不到,一直往前找,找到当前置为止
for days := 1; days <= MaxDays; days++ {
//是否可以跳天
@@ -290,12 +285,12 @@ func (mp *MongoPersist) FindTopicData(topic string, startIndex uint64, limit int
IsJumpDays := true
//如果是昨天,先判断当天有没有表数据
bYesterday,strToday := mp.IsYesterday(startIndex)
if bYesterday {
count,err := mp.getCollectCount(topic,strToday)
bYesterday, strToday := mp.IsYesterday(startIndex)
if bYesterday {
count, err := mp.getCollectCount(topic, strToday)
if err != nil {
//失败时,重新开始
log.SError("getCollectCount ",topic,"_",strToday," is fail:",err.Error())
log.SError("getCollectCount ", topic, "_", strToday, " is fail:", err.Error())
return nil
}
//当天没有记录,则不能跳表,有可能当天还有数据
@@ -305,9 +300,9 @@ func (mp *MongoPersist) FindTopicData(topic string, startIndex uint64, limit int
}
//从startIndex开始一直往后查
topicData, isSucc := mp.findTopicData(topic, startIndex, limit,topicBuff)
topicData, ok := mp.findTopicData(topic, startIndex, limit, topicBuff)
//有数据或者数据库出错时返回,返回后,会进行下一轮的查询遍历
if len(topicData) > 0 || isSucc == false {
if len(topicData) > 0 || ok == false {
return topicData
}
@@ -411,14 +406,14 @@ func (mp *MongoPersist) PersistIndex(topic string, customerId string, index uint
condition := bson.D{{Key: "Customer", Value: customerId}, {Key: "Topic", Value: topic}}
upsert := bson.M{"Customer": customerId, "Topic": topic, "Index": index}
updata := bson.M{"$set": upsert}
update := bson.M{"$set": upsert}
var UpdateOptionsOpts []*options.UpdateOptions
UpdateOptionsOpts = append(UpdateOptionsOpts, options.Update().SetUpsert(true))
ctx, cancel := s.GetDefaultContext()
defer cancel()
_, err := s.Collection(mp.dbName, CustomerCollectName).UpdateOne(ctx, condition, updata, UpdateOptionsOpts...)
_, err := s.Collection(mp.dbName, CustomerCollectName).UpdateOne(ctx, condition, update, UpdateOptionsOpts...)
if err != nil {
log.SError("PersistIndex fail :", err.Error())
}

View File

@@ -1,13 +1,13 @@
package messagequeueservice
import (
"github.com/duanhf2012/origin/log"
"github.com/duanhf2012/origin/rpc"
"github.com/duanhf2012/origin/v2/log"
"github.com/duanhf2012/origin/v2/rpc"
"sync"
)
// 订阅器
// Subscriber 订阅器
type Subscriber struct {
customerLocker sync.RWMutex
mapCustomer map[string]*CustomerSubscriber
@@ -31,7 +31,7 @@ func (ss *Subscriber) PersistTopicData(topic string, topics []TopicData, retryCo
return ss.dataPersist.PersistTopicData(topic, topics, retryCount)
}
func (ss *Subscriber) TopicSubscribe(rpcHandler rpc.IRpcHandler, subScribeType rpc.SubscribeType, subscribeMethod SubscribeMethod, fromNodeId int, callBackRpcMethod string, topic string, customerId string, StartIndex uint64, oneBatchQuantity int32) error {
func (ss *Subscriber) TopicSubscribe(rpcHandler rpc.IRpcHandler, subScribeType rpc.SubscribeType, subscribeMethod SubscribeMethod, fromNodeId string, callBackRpcMethod string, topic string, customerId string, StartIndex uint64, oneBatchQuantity int32) error {
//取消订阅时
if subScribeType == rpc.SubscribeType_Unsubscribe {
ss.UnSubscribe(customerId)

View File

@@ -2,8 +2,8 @@ package messagequeueservice
import (
"errors"
"github.com/duanhf2012/origin/log"
"github.com/duanhf2012/origin/util/coroutine"
"github.com/duanhf2012/origin/v2/log"
"github.com/duanhf2012/origin/v2/util/coroutine"
"sync"
"sync/atomic"
"time"
@@ -43,7 +43,7 @@ type TopicRoom struct {
isStop int32
}
// maxProcessTopicBacklogNum:主题最大积压数量
// Init maxProcessTopicBacklogNum:主题最大积压数量
func (tr *TopicRoom) Init(maxTopicBacklogNum int32, memoryQueueLen int32, topic string, queueWait *sync.WaitGroup, dataPersist QueueDataPersist) {
if maxTopicBacklogNum == 0 {
maxTopicBacklogNum = DefaultMaxTopicBacklogNum
@@ -116,18 +116,18 @@ func (tr *TopicRoom) topicRoomRun() {
for retryCount := 0; retryCount < maxTryPersistNum; {
//持久化处理
stagingBuff, savedBuff, ret := tr.PersistTopicData(tr.topic, stagingBuff, retryCount+1)
if ret == true {
// 1. 把成功存储的数据放入内存中
if len(savedBuff) > 0 {
tr.PushTopicDataToQueue(tr.topic, savedBuff)
}
// 2. 如果存档成功,并且有后续批次,则继续存档
if ret == true && len(stagingBuff) > 0 {
if len(stagingBuff) > 0 {
continue
}
// 3. 成功了,跳出
break
} else {

View File

@@ -2,10 +2,10 @@ package rankservice
import (
"fmt"
"github.com/duanhf2012/origin/log"
"github.com/duanhf2012/origin/rpc"
"github.com/duanhf2012/origin/service"
"github.com/duanhf2012/origin/sysmodule/mongodbmodule"
"github.com/duanhf2012/origin/v2/log"
"github.com/duanhf2012/origin/v2/rpc"
"github.com/duanhf2012/origin/v2/service"
"github.com/duanhf2012/origin/v2/sysmodule/mongodbmodule"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo/options"
"runtime"
@@ -14,37 +14,37 @@ import (
"time"
)
const batchRemoveNum = 128 //一切删除的最大数量
const batchRemoveNum = 128 //一切删除的最大数量
// RankDataDB 排行表数据
type RankDataDB struct {
Id uint64 `bson:"_id"`
RefreshTime int64 `bson:"RefreshTime"`
SortData []int64 `bson:"SortData"`
Data []byte `bson:"Data"`
ExData []int64 `bson:"ExData"`
Id uint64 `bson:"_id"`
RefreshTime int64 `bson:"RefreshTime"`
SortData []int64 `bson:"SortData"`
Data []byte `bson:"Data"`
ExData []int64 `bson:"ExData"`
}
// MongoPersist持久化Module
// MongoPersist 持久化Module
type MongoPersist struct {
service.Module
mongo mongodbmodule.MongoModule
url string //Mongodb连接url
dbName string //数据库名称
SaveInterval time.Duration //落地数据库时间间隔
url string //Mongodb连接url
dbName string //数据库名称
SaveInterval time.Duration //落地数据库时间间隔
sync.Mutex
mapRemoveRankData map[uint64]map[uint64]struct{} //将要删除的排行数据 map[RankId]map[Key]struct{}
mapUpsertRankData map[uint64]map[uint64]RankData //需要upsert的排行数据 map[RankId][key]RankData
mapRankSkip map[uint64]IRankSkip //所有的排行榜对象map[RankId]IRankSkip
maxRetrySaveCount int //存档重试次数
retryTimeIntervalMs time.Duration //重试时间间隔
mapRankSkip map[uint64]IRankSkip //所有的排行榜对象map[RankId]IRankSkip
maxRetrySaveCount int //存档重试次数
retryTimeIntervalMs time.Duration //重试时间间隔
lastSaveTime time.Time //最后一次存档时间
lastSaveTime time.Time //最后一次存档时间
stop int32 //是否停服
stop int32 //是否停服
waitGroup sync.WaitGroup //等待停服
}
@@ -84,12 +84,12 @@ func (mp *MongoPersist) ReadCfg() error {
}
//读取数据库配置
saveMongoCfg,ok := mapDBServiceCfg["SaveMongo"]
saveMongoCfg, ok := mapDBServiceCfg["SaveMongo"]
if ok == false {
return fmt.Errorf("RankService.SaveMongo config is error")
}
mongodbCfg,ok := saveMongoCfg.(map[string]interface{})
mongodbCfg, ok := saveMongoCfg.(map[string]interface{})
if ok == false {
return fmt.Errorf("RankService.SaveMongo config is error")
}
@@ -111,7 +111,7 @@ func (mp *MongoPersist) ReadCfg() error {
return fmt.Errorf("RankService.SaveMongo.SaveIntervalMs config is error")
}
mp.SaveInterval = time.Duration(saveInterval.(float64))*time.Millisecond
mp.SaveInterval = time.Duration(saveInterval.(float64)) * time.Millisecond
maxRetrySaveCount, ok := mongodbCfg["MaxRetrySaveCount"]
if ok == false {
@@ -123,16 +123,16 @@ func (mp *MongoPersist) ReadCfg() error {
if ok == false {
return fmt.Errorf("RankService.SaveMongo.RetryTimeIntervalMs config is error")
}
mp.retryTimeIntervalMs = time.Duration(retryTimeIntervalMs.(float64))*time.Millisecond
mp.retryTimeIntervalMs = time.Duration(retryTimeIntervalMs.(float64)) * time.Millisecond
return nil
}
//启服从数据库加载
// OnStart 启服从数据库加载
func (mp *MongoPersist) OnStart() {
}
func (mp *MongoPersist) OnSetupRank(manual bool,rankSkip *RankSkip) error{
func (mp *MongoPersist) OnSetupRank(manual bool, rankSkip *RankSkip) error {
if mp.mapRankSkip == nil {
mp.mapRankSkip = map[uint64]IRankSkip{}
}
@@ -142,17 +142,17 @@ func (mp *MongoPersist) OnSetupRank(manual bool,rankSkip *RankSkip) error{
return nil
}
log.Info("start load rank ",rankSkip.GetRankName()," from mongodb.")
err := mp.loadFromDB(rankSkip.GetRankID(),rankSkip.GetRankName())
if err != nil {
log.SError("load from db is fail :%s",err.Error())
log.Info("start load rank ", rankSkip.GetRankName(), " from mongodb.")
err := mp.loadFromDB(rankSkip.GetRankID(), rankSkip.GetRankName())
if err != nil {
log.SError("load from db is fail :%s", err.Error())
return err
}
log.Info("finish load rank ",rankSkip.GetRankName()," from mongodb.")
log.Info("finish load rank ", rankSkip.GetRankName(), " from mongodb.")
return nil
}
func (mp *MongoPersist) loadFromDB(rankId uint64,rankCollectName string) error{
func (mp *MongoPersist) loadFromDB(rankId uint64, rankCollectName string) error {
s := mp.mongo.TakeSession()
ctx, cancel := s.GetDefaultContext()
defer cancel()
@@ -164,14 +164,14 @@ func (mp *MongoPersist) loadFromDB(rankId uint64,rankCollectName string) error{
return err
}
if cursor.Err()!=nil {
if cursor.Err() != nil {
log.SError("find collect name ", rankCollectName, " is error:", cursor.Err().Error())
return err
}
rankSkip := mp.mapRankSkip[rankId]
if rankSkip == nil {
err = fmt.Errorf("rank ", rankCollectName, " is not setup:")
err = fmt.Errorf("rank %s is not setup", rankCollectName)
log.SError(err.Error())
return err
}
@@ -189,69 +189,68 @@ func (mp *MongoPersist) loadFromDB(rankId uint64,rankCollectName string) error{
rankData.Data = rankDataDB.Data
rankData.Key = rankDataDB.Id
rankData.SortData = rankDataDB.SortData
for _,eData := range rankDataDB.ExData{
rankData.ExData = append(rankData.ExData,&rpc.ExtendIncData{InitValue:eData})
for _, eData := range rankDataDB.ExData {
rankData.ExData = append(rankData.ExData, &rpc.ExtendIncData{InitValue: eData})
}
//更新到排行榜
rankSkip.UpsetRank(&rankData,rankDataDB.RefreshTime,true)
rankSkip.UpsetRank(&rankData, rankDataDB.RefreshTime, true)
}
return nil
}
func (mp *MongoPersist) lazyInitRemoveMap(rankId uint64){
func (mp *MongoPersist) lazyInitRemoveMap(rankId uint64) {
if mp.mapRemoveRankData[rankId] == nil {
mp.mapRemoveRankData[rankId] = make(map[uint64]struct{},256)
mp.mapRemoveRankData[rankId] = make(map[uint64]struct{}, 256)
}
}
func (mp *MongoPersist) lazyInitUpsertMap(rankId uint64){
func (mp *MongoPersist) lazyInitUpsertMap(rankId uint64) {
if mp.mapUpsertRankData[rankId] == nil {
mp.mapUpsertRankData[rankId] = make(map[uint64]RankData,256)
mp.mapUpsertRankData[rankId] = make(map[uint64]RankData, 256)
}
}
func (mp *MongoPersist) OnEnterRank(rankSkip IRankSkip, enterData *RankData){
func (mp *MongoPersist) OnEnterRank(rankSkip IRankSkip, enterData *RankData) {
mp.Lock()
defer mp.Unlock()
delete(mp.mapRemoveRankData,enterData.Key)
delete(mp.mapRemoveRankData, enterData.Key)
mp.lazyInitUpsertMap(rankSkip.GetRankID())
mp.mapUpsertRankData[rankSkip.GetRankID()][enterData.Key] = *enterData
}
func (mp *MongoPersist) OnLeaveRank(rankSkip IRankSkip, leaveData *RankData){
func (mp *MongoPersist) OnLeaveRank(rankSkip IRankSkip, leaveData *RankData) {
mp.Lock()
defer mp.Unlock()
//先删掉更新中的数据
delete(mp.mapUpsertRankData,leaveData.Key)
delete(mp.mapUpsertRankData, leaveData.Key)
mp.lazyInitRemoveMap(rankSkip.GetRankID())
mp.mapRemoveRankData[rankSkip.GetRankID()][leaveData.Key] = struct{}{}
}
func (mp *MongoPersist) OnChangeRankData(rankSkip IRankSkip, changeData *RankData){
func (mp *MongoPersist) OnChangeRankData(rankSkip IRankSkip, changeData *RankData) {
mp.Lock()
defer mp.Unlock()
//先删掉要删除的数据
delete(mp.mapRemoveRankData,changeData.Key)
delete(mp.mapRemoveRankData, changeData.Key)
//更新数据
mp.lazyInitUpsertMap(rankSkip.GetRankID())
mp.mapUpsertRankData[rankSkip.GetRankID()][changeData.Key] = *changeData
}
//停存持久化到DB
func (mp *MongoPersist) OnStop(mapRankSkip map[uint64]*RankSkip){
atomic.StoreInt32(&mp.stop,1)
// OnStop 停存持久化到DB
func (mp *MongoPersist) OnStop(mapRankSkip map[uint64]*RankSkip) {
atomic.StoreInt32(&mp.stop, 1)
mp.waitGroup.Wait()
}
func (mp *MongoPersist) JugeTimeoutSave() bool{
func (mp *MongoPersist) JudgeTimeoutSave() bool {
timeout := time.Now()
isTimeOut := timeout.Sub(mp.lastSaveTime) >= mp.SaveInterval
if isTimeOut == true {
@@ -261,18 +260,18 @@ func (mp *MongoPersist) JugeTimeoutSave() bool{
return isTimeOut
}
func (mp *MongoPersist) persistCoroutine(){
func (mp *MongoPersist) persistCoroutine() {
defer mp.waitGroup.Done()
for atomic.LoadInt32(&mp.stop)==0 || mp.hasPersistData(){
for atomic.LoadInt32(&mp.stop) == 0 {
//间隔时间sleep
time.Sleep(time.Second*1)
time.Sleep(time.Second * 1)
//没有持久化数据continue
if mp.hasPersistData() == false {
continue
}
if mp.JugeTimeoutSave() == false{
if mp.JudgeTimeoutSave() == false {
continue
}
@@ -284,20 +283,20 @@ func (mp *MongoPersist) persistCoroutine(){
mp.saveToDB()
}
func (mp *MongoPersist) hasPersistData() bool{
func (mp *MongoPersist) hasPersistData() bool {
mp.Lock()
defer mp.Unlock()
return len(mp.mapUpsertRankData)>0 || len(mp.mapRemoveRankData) >0
return len(mp.mapUpsertRankData) > 0 || len(mp.mapRemoveRankData) > 0
}
func (mp *MongoPersist) saveToDB(){
func (mp *MongoPersist) saveToDB() {
defer func() {
if r := recover(); r != nil {
buf := make([]byte, 4096)
l := runtime.Stack(buf, false)
errString := fmt.Sprint(r)
log.Dump(string(buf[:l]),log.String("error",errString))
log.Dump(string(buf[:l]), log.String("error", errString))
}
}()
@@ -314,13 +313,13 @@ func (mp *MongoPersist) saveToDB(){
mp.upsertRankDataToDB(mapUpsertRankData)
}
for len(mapRemoveRankData) >0 {
for len(mapRemoveRankData) > 0 {
mp.removeRankDataToDB(mapRemoveRankData)
}
}
func (mp *MongoPersist) removeToDB(collectName string,keys []uint64) error{
func (mp *MongoPersist) removeToDB(collectName string, keys []uint64) error {
s := mp.mongo.TakeSession()
ctx, cancel := s.GetDefaultContext()
defer cancel()
@@ -336,16 +335,16 @@ func (mp *MongoPersist) removeToDB(collectName string,keys []uint64) error{
return nil
}
func (mp *MongoPersist) removeRankData(rankId uint64,keys []uint64) bool {
func (mp *MongoPersist) removeRankData(rankId uint64, keys []uint64) bool {
rank := mp.mapRankSkip[rankId]
if rank== nil {
log.SError("cannot find rankId ",rankId,"config")
if rank == nil {
log.SError("cannot find rankId ", rankId, "config")
return false
}
//不成功则重试maxRetrySaveCount次
for i:=0;i<mp.maxRetrySaveCount;i++{
if mp.removeToDB(rank.GetRankName(),keys)!= nil {
for i := 0; i < mp.maxRetrySaveCount; i++ {
if mp.removeToDB(rank.GetRankName(), keys) != nil {
time.Sleep(mp.retryTimeIntervalMs)
continue
}
@@ -355,9 +354,9 @@ func (mp *MongoPersist) removeRankData(rankId uint64,keys []uint64) bool {
return true
}
func (mp *MongoPersist) upsertToDB(collectName string,rankData *RankData) error{
func (mp *MongoPersist) upsertToDB(collectName string, rankData *RankData) error {
condition := bson.D{{"_id", rankData.Key}}
upsert := bson.M{"_id":rankData.Key,"RefreshTime": rankData.refreshTimestamp, "SortData": rankData.SortData, "Data": rankData.Data,"ExData":rankData.ExData}
upsert := bson.M{"_id": rankData.Key, "RefreshTime": rankData.RefreshTimestamp, "SortData": rankData.SortData, "Data": rankData.Data, "ExData": rankData.ExData}
update := bson.M{"$set": upsert}
s := mp.mongo.TakeSession()
@@ -365,7 +364,7 @@ func (mp *MongoPersist) upsertToDB(collectName string,rankData *RankData) error{
defer cancel()
updateOpts := options.Update().SetUpsert(true)
_, err := s.Collection(mp.dbName, collectName).UpdateOne(ctx, condition,update,updateOpts)
_, err := s.Collection(mp.dbName, collectName).UpdateOne(ctx, condition, update, updateOpts)
if err != nil {
log.SError("MongoPersist upsertDB fail,collect name is ", collectName)
return err
@@ -374,19 +373,19 @@ func (mp *MongoPersist) upsertToDB(collectName string,rankData *RankData) error{
return nil
}
func (mp *MongoPersist) upsertRankDataToDB(mapUpsertRankData map[uint64]map[uint64]RankData) error{
for rankId,mapRankData := range mapUpsertRankData{
rank,ok := mp.mapRankSkip[rankId]
func (mp *MongoPersist) upsertRankDataToDB(mapUpsertRankData map[uint64]map[uint64]RankData) error {
for rankId, mapRankData := range mapUpsertRankData {
rank, ok := mp.mapRankSkip[rankId]
if ok == false {
log.SError("cannot find rankId ",rankId,",config is error")
delete(mapUpsertRankData,rankId)
log.SError("cannot find rankId ", rankId, ",config is error")
delete(mapUpsertRankData, rankId)
continue
}
for key,rankData := range mapRankData{
for key, rankData := range mapRankData {
//最大重试mp.maxRetrySaveCount次
for i:=0;i<mp.maxRetrySaveCount;i++{
err := mp.upsertToDB(rank.GetRankName(),&rankData)
for i := 0; i < mp.maxRetrySaveCount; i++ {
err := mp.upsertToDB(rank.GetRankName(), &rankData)
if err != nil {
time.Sleep(mp.retryTimeIntervalMs)
continue
@@ -395,11 +394,11 @@ func (mp *MongoPersist) upsertRankDataToDB(mapUpsertRankData map[uint64]map[uint
}
//存完删掉指定key
delete(mapRankData,key)
delete(mapRankData, key)
}
if len(mapRankData) == 0 {
delete(mapUpsertRankData,rankId)
delete(mapUpsertRankData, rankId)
}
}
@@ -407,23 +406,22 @@ func (mp *MongoPersist) upsertRankDataToDB(mapUpsertRankData map[uint64]map[uint
}
func (mp *MongoPersist) removeRankDataToDB(mapRemoveRankData map[uint64]map[uint64]struct{}) {
for rankId ,mapRemoveKey := range mapRemoveRankData{
for rankId, mapRemoveKey := range mapRemoveRankData {
//每100个一删
keyList := make([]uint64,0,batchRemoveNum)
for key := range mapRemoveKey {
delete(mapRemoveKey,key)
keyList = append(keyList,key)
if len(keyList) >= batchRemoveNum {
break
}
}
keyList := make([]uint64, 0, batchRemoveNum)
for key := range mapRemoveKey {
delete(mapRemoveKey, key)
keyList = append(keyList, key)
if len(keyList) >= batchRemoveNum {
break
}
}
mp.removeRankData(rankId,keyList)
mp.removeRankData(rankId, keyList)
//如果删完删掉rankid下所有
if len(mapRemoveKey) == 0 {
delete(mapRemoveRankData,rankId)
}
if len(mapRemoveKey) == 0 {
delete(mapRemoveRankData, rankId)
}
}
}

View File

@@ -1,9 +1,9 @@
package rankservice
import (
"github.com/duanhf2012/origin/rpc"
"github.com/duanhf2012/origin/util/algorithms/skip"
"github.com/duanhf2012/origin/util/sync"
"github.com/duanhf2012/origin/v2/rpc"
"github.com/duanhf2012/origin/v2/util/algorithms/skip"
"github.com/duanhf2012/origin/v2/util/sync"
)
var emptyRankData RankData
@@ -19,7 +19,7 @@ type RankData struct {
Data []byte
ExData []int64
refreshTimestamp int64 //刷新时间
RefreshTimestamp int64 //刷新时间
//bRelease bool
ref bool
compareFunc func(other skip.Comparator) int
@@ -39,7 +39,7 @@ func NewRankData(isDec bool, data *rpc.RankData,refreshTimestamp int64) *RankDat
ret.ExData = append(ret.ExData,d.InitValue+d.IncreaseValue)
}
ret.refreshTimestamp = refreshTimestamp
ret.RefreshTimestamp = refreshTimestamp
return ret
}

View File

@@ -2,7 +2,7 @@ package rankservice
import (
"container/heap"
"github.com/duanhf2012/origin/util/sync"
"github.com/duanhf2012/origin/v2/util/sync"
"time"
)

View File

@@ -1,8 +1,8 @@
package rankservice
import (
"github.com/duanhf2012/origin/service"
"github.com/duanhf2012/origin/rpc"
"github.com/duanhf2012/origin/v2/rpc"
"github.com/duanhf2012/origin/v2/service"
)
type RankDataChangeType int8
@@ -11,19 +11,18 @@ type IRankSkip interface {
GetRankID() uint64
GetRankName() string
GetRankLen() uint64
UpsetRank(upsetData *rpc.RankData,refreshTimestamp int64,fromLoad bool) RankDataChangeType
UpsetRank(upsetData *rpc.RankData, refreshTimestamp int64, fromLoad bool) RankDataChangeType
}
type IRankModule interface {
service.IModule
OnSetupRank(manual bool,rankSkip *RankSkip) error //当完成安装排行榜对象时
OnStart() //服务开启时回调
OnSetupRank(manual bool, rankSkip *RankSkip) error //当完成安装排行榜对象时
OnStart() //服务开启时回调
OnEnterRank(rankSkip IRankSkip, enterData *RankData) //进入排行
OnLeaveRank(rankSkip IRankSkip, leaveData *RankData) //离开排行
OnChangeRankData(rankSkip IRankSkip, changeData *RankData) //当排行数据变化时
OnStop(mapRankSkip map[uint64]*RankSkip) //服务结束时回调
OnStop(mapRankSkip map[uint64]*RankSkip) //服务结束时回调
}
type DefaultRankModule struct {

View File

@@ -4,9 +4,9 @@ import (
"fmt"
"time"
"github.com/duanhf2012/origin/log"
"github.com/duanhf2012/origin/rpc"
"github.com/duanhf2012/origin/service"
"github.com/duanhf2012/origin/v2/log"
"github.com/duanhf2012/origin/v2/rpc"
"github.com/duanhf2012/origin/v2/service"
)
const PreMapRankSkipLen = 10
@@ -45,7 +45,7 @@ func (rs *RankService) OnRelease() {
rs.rankModule.OnStop(rs.mapRankSkip)
}
// 安装排行模块
// SetupRankModule 安装排行模块
func (rs *RankService) SetupRankModule(rankModule IRankModule) {
rs.rankModule = rankModule
}

View File

@@ -4,8 +4,8 @@ import (
"fmt"
"time"
"github.com/duanhf2012/origin/rpc"
"github.com/duanhf2012/origin/util/algorithms/skip"
"github.com/duanhf2012/origin/v2/rpc"
"github.com/duanhf2012/origin/v2/util/algorithms/skip"
)
type RankSkip struct {
@@ -250,7 +250,7 @@ func (rs *RankSkip) UpsetRank(upsetData *rpc.RankData, refreshTimestamp int64, f
//找到的情况对比排名数据是否有变化,无变化进行data更新,有变化则进行删除更新
if compareIsEqual(rankNode.SortData, upsetData.SortData) {
rankNode.Data = upsetData.GetData()
rankNode.refreshTimestamp = refreshTimestamp
rankNode.RefreshTimestamp = refreshTimestamp
if fromLoad == false {
rs.rankModule.OnChangeRankData(rs, rankNode)
@@ -342,7 +342,7 @@ func (rs *RankSkip) GetRankNodeData(findKey uint64) (*RankData, uint64) {
return rankNode, index + 1
}
// GetRankNodeDataByPos 获取,返回排名节点与名次
// GetRankNodeDataByRank 获取,返回排名节点与名次
func (rs *RankSkip) GetRankNodeDataByRank(rank uint64) (*RankData, uint64) {
rs.pickExpireKey()
rankNode := rs.skipList.ByPosition(rank - 1)
@@ -382,7 +382,7 @@ func (rs *RankSkip) GetRankKeyPrevToLimit(findKey, count uint64, result *rpc.Ran
return nil
}
// GetRankKeyPrevToLimit 获取key前count名的数据
// GetRankKeyNextToLimit 获取key前count名的数据
func (rs *RankSkip) GetRankKeyNextToLimit(findKey, count uint64, result *rpc.RankDataList) error {
if rs.GetRankLen() <= 0 {
return fmt.Errorf("rank[%d] no data", rs.rankId)
@@ -411,7 +411,7 @@ func (rs *RankSkip) GetRankKeyNextToLimit(findKey, count uint64, result *rpc.Ran
return nil
}
// GetRankList 获取排行榜数据,startPos开始的count个数据
// GetRankDataFromToLimit 获取排行榜数据,startPos开始的count个数据
func (rs *RankSkip) GetRankDataFromToLimit(startPos, count uint64, result *rpc.RankDataList) error {
if rs.GetRankLen() <= 0 {
//初始排行榜可能没有数据

View File

@@ -2,16 +2,15 @@ package tcpservice
import (
"fmt"
"github.com/duanhf2012/origin/event"
"github.com/duanhf2012/origin/log"
"github.com/duanhf2012/origin/network"
"github.com/duanhf2012/origin/network/processor"
"github.com/duanhf2012/origin/node"
"github.com/duanhf2012/origin/service"
"github.com/duanhf2012/origin/util/bytespool"
"runtime"
"github.com/duanhf2012/origin/v2/event"
"github.com/duanhf2012/origin/v2/log"
"github.com/duanhf2012/origin/v2/network"
"github.com/duanhf2012/origin/v2/network/processor"
"github.com/duanhf2012/origin/v2/service"
"github.com/duanhf2012/origin/v2/util/bytespool"
"github.com/google/uuid"
"strings"
"sync"
"sync/atomic"
"time"
)
@@ -20,105 +19,82 @@ type TcpService struct {
service.Service
mapClientLocker sync.RWMutex
mapClient map[uint64] *Client
process processor.IProcessor
mapClient map[string]*Client
process processor.IProcessor
}
type TcpPackType int8
const(
TPT_Connected TcpPackType = 0
TPT_DisConnected TcpPackType = 1
TPT_Pack TcpPackType = 2
TPT_UnknownPack TcpPackType = 3
)
const (
MaxNodeId = 1<<14 - 1 //最大值 16383
MaxSeed = 1<<19 - 1 //最大值 524287
MaxTime = 1<<31 - 1 //最大值 2147483647
TPT_Connected TcpPackType = 0
TPT_DisConnected TcpPackType = 1
TPT_Pack TcpPackType = 2
TPT_UnknownPack TcpPackType = 3
)
var seed uint32
type TcpPack struct {
Type TcpPackType //0表示连接 1表示断开 2表示数据
ClientId uint64
Data interface{}
Type TcpPackType //0表示连接 1表示断开 2表示数据
ClientId string
Data interface{}
}
type Client struct {
id uint64
tcpConn *network.TCPConn
id string
tcpConn *network.NetConn
tcpService *TcpService
}
func (tcpService *TcpService) genId() uint64 {
if node.GetNodeId()>MaxNodeId{
panic("nodeId exceeds the maximum!")
}
newSeed := atomic.AddUint32(&seed,1) % MaxSeed
nowTime := uint64(time.Now().Unix())%MaxTime
return (uint64(node.GetNodeId())<<50)|(nowTime<<19)|uint64(newSeed)
}
func GetNodeId(agentId uint64) int {
return int(agentId>>50)
}
func (tcpService *TcpService) OnInit() error{
func (tcpService *TcpService) OnInit() error {
iConfig := tcpService.GetServiceCfg()
if iConfig == nil {
return fmt.Errorf("%s service config is error!", tcpService.GetName())
return fmt.Errorf("%s service config is error", tcpService.GetName())
}
tcpCfg := iConfig.(map[string]interface{})
addr,ok := tcpCfg["ListenAddr"]
addr, ok := tcpCfg["ListenAddr"]
if ok == false {
return fmt.Errorf("%s service config is error!", tcpService.GetName())
return fmt.Errorf("%s service config is error", tcpService.GetName())
}
tcpService.tcpServer.Addr = addr.(string)
MaxConnNum,ok := tcpCfg["MaxConnNum"]
MaxConnNum, ok := tcpCfg["MaxConnNum"]
if ok == true {
tcpService.tcpServer.MaxConnNum = int(MaxConnNum.(float64))
}
PendingWriteNum,ok := tcpCfg["PendingWriteNum"]
PendingWriteNum, ok := tcpCfg["PendingWriteNum"]
if ok == true {
tcpService.tcpServer.PendingWriteNum = int(PendingWriteNum.(float64))
}
LittleEndian,ok := tcpCfg["LittleEndian"]
LittleEndian, ok := tcpCfg["LittleEndian"]
if ok == true {
tcpService.tcpServer.LittleEndian = LittleEndian.(bool)
}
LenMsgLen,ok := tcpCfg["LenMsgLen"]
LenMsgLen, ok := tcpCfg["LenMsgLen"]
if ok == true {
tcpService.tcpServer.LenMsgLen = int(LenMsgLen.(float64))
}
MinMsgLen,ok := tcpCfg["MinMsgLen"]
MinMsgLen, ok := tcpCfg["MinMsgLen"]
if ok == true {
tcpService.tcpServer.MinMsgLen = uint32(MinMsgLen.(float64))
}
MaxMsgLen,ok := tcpCfg["MaxMsgLen"]
MaxMsgLen, ok := tcpCfg["MaxMsgLen"]
if ok == true {
tcpService.tcpServer.MaxMsgLen = uint32(MaxMsgLen.(float64))
}
readDeadline,ok := tcpCfg["ReadDeadline"]
readDeadline, ok := tcpCfg["ReadDeadline"]
if ok == true {
tcpService.tcpServer.ReadDeadline = time.Second*time.Duration(readDeadline.(float64))
tcpService.tcpServer.ReadDeadline = time.Second * time.Duration(readDeadline.(float64))
}
writeDeadline,ok := tcpCfg["WriteDeadline"]
writeDeadline, ok := tcpCfg["WriteDeadline"]
if ok == true {
tcpService.tcpServer.WriteDeadline = time.Second*time.Duration(writeDeadline.(float64))
tcpService.tcpServer.WriteDeadline = time.Second * time.Duration(writeDeadline.(float64))
}
tcpService.mapClient = make( map[uint64] *Client, tcpService.tcpServer.MaxConnNum)
tcpService.mapClient = make(map[string]*Client, tcpService.tcpServer.MaxConnNum)
tcpService.tcpServer.NewAgent = tcpService.NewClient
tcpService.tcpServer.Start()
return nil
return tcpService.tcpServer.Start()
}
func (tcpService *TcpService) TcpEventHandler(ev event.IEvent) {
@@ -129,148 +105,138 @@ func (tcpService *TcpService) TcpEventHandler(ev event.IEvent) {
case TPT_DisConnected:
tcpService.process.DisConnectedRoute(pack.ClientId)
case TPT_UnknownPack:
tcpService.process.UnknownMsgRoute(pack.ClientId,pack.Data)
tcpService.process.UnknownMsgRoute(pack.ClientId, pack.Data, tcpService.recyclerReaderBytes)
case TPT_Pack:
tcpService.process.MsgRoute(pack.ClientId,pack.Data)
tcpService.process.MsgRoute(pack.ClientId, pack.Data, tcpService.recyclerReaderBytes)
}
}
func (tcpService *TcpService) SetProcessor(process processor.IProcessor,handler event.IEventHandler){
tcpService.process = process
tcpService.RegEventReceiverFunc(event.Sys_Event_Tcp,handler, tcpService.TcpEventHandler)
func (tcpService *TcpService) recyclerReaderBytes(data []byte) {
}
func (tcpService *TcpService) NewClient(conn *network.TCPConn) network.Agent {
func (tcpService *TcpService) SetProcessor(process processor.IProcessor, handler event.IEventHandler) {
tcpService.process = process
tcpService.RegEventReceiverFunc(event.Sys_Event_Tcp, handler, tcpService.TcpEventHandler)
}
func (tcpService *TcpService) NewClient(conn network.Conn) network.Agent {
tcpService.mapClientLocker.Lock()
defer tcpService.mapClientLocker.Unlock()
for {
clientId := tcpService.genId()
_,ok := tcpService.mapClient[clientId]
if ok == true {
continue
}
uuId, _ := uuid.NewUUID()
clientId := strings.ReplaceAll(uuId.String(), "-", "")
pClient := &Client{tcpConn: conn.(*network.NetConn), id: clientId}
pClient.tcpService = tcpService
tcpService.mapClient[clientId] = pClient
pClient := &Client{tcpConn:conn, id:clientId}
pClient.tcpService = tcpService
tcpService.mapClient[clientId] = pClient
return pClient
}
return nil
return pClient
}
func (slf *Client) GetId() uint64 {
func (slf *Client) GetId() string {
return slf.id
}
func (slf *Client) Run() {
defer func() {
if r := recover(); r != nil {
buf := make([]byte, 4096)
l := runtime.Stack(buf, false)
errString := fmt.Sprint(r)
log.Dump(string(buf[:l]),log.String("error",errString))
log.StackError(fmt.Sprint(r))
}
}()
slf.tcpService.NotifyEvent(&event.Event{Type:event.Sys_Event_Tcp,Data:TcpPack{ClientId:slf.id,Type:TPT_Connected}})
for{
slf.tcpService.NotifyEvent(&event.Event{Type: event.Sys_Event_Tcp, Data: TcpPack{ClientId: slf.id, Type: TPT_Connected}})
for {
if slf.tcpConn == nil {
break
}
slf.tcpConn.SetReadDeadline(slf.tcpService.tcpServer.ReadDeadline)
bytes,err := slf.tcpConn.ReadMsg()
bytes, err := slf.tcpConn.ReadMsg()
if err != nil {
log.Debug("read client failed",log.ErrorAttr("error",err),log.Uint64("clientId",slf.id))
log.Debug("read client failed", log.ErrorField("error", err), log.String("clientId", slf.id))
break
}
data,err:=slf.tcpService.process.Unmarshal(slf.id,bytes)
data, err := slf.tcpService.process.Unmarshal(slf.id, bytes)
if err != nil {
slf.tcpService.NotifyEvent(&event.Event{Type:event.Sys_Event_Tcp,Data:TcpPack{ClientId:slf.id,Type:TPT_UnknownPack,Data:bytes}})
slf.tcpService.NotifyEvent(&event.Event{Type: event.Sys_Event_Tcp, Data: TcpPack{ClientId: slf.id, Type: TPT_UnknownPack, Data: bytes}})
continue
}
slf.tcpService.NotifyEvent(&event.Event{Type:event.Sys_Event_Tcp,Data:TcpPack{ClientId:slf.id,Type:TPT_Pack,Data:data}})
slf.tcpService.NotifyEvent(&event.Event{Type: event.Sys_Event_Tcp, Data: TcpPack{ClientId: slf.id, Type: TPT_Pack, Data: data}})
}
}
func (slf *Client) OnClose(){
slf.tcpService.NotifyEvent(&event.Event{Type:event.Sys_Event_Tcp,Data:TcpPack{ClientId:slf.id,Type:TPT_DisConnected}})
func (slf *Client) OnClose() {
slf.tcpService.NotifyEvent(&event.Event{Type: event.Sys_Event_Tcp, Data: TcpPack{ClientId: slf.id, Type: TPT_DisConnected}})
slf.tcpService.mapClientLocker.Lock()
defer slf.tcpService.mapClientLocker.Unlock()
delete (slf.tcpService.mapClient,slf.GetId())
delete(slf.tcpService.mapClient, slf.GetId())
}
func (tcpService *TcpService) SendMsg(clientId uint64,msg interface{}) error{
func (tcpService *TcpService) SendMsg(clientId string, msg interface{}) error {
tcpService.mapClientLocker.Lock()
client,ok := tcpService.mapClient[clientId]
if ok == false{
client, ok := tcpService.mapClient[clientId]
if ok == false {
tcpService.mapClientLocker.Unlock()
return fmt.Errorf("client %d is disconnect!",clientId)
return fmt.Errorf("client %s is disconnect", clientId)
}
tcpService.mapClientLocker.Unlock()
bytes,err := tcpService.process.Marshal(clientId,msg)
bytes, err := tcpService.process.Marshal(clientId, msg)
if err != nil {
return err
}
return client.tcpConn.WriteMsg(bytes)
}
func (tcpService *TcpService) Close(clientId uint64) {
func (tcpService *TcpService) Close(clientId string) {
tcpService.mapClientLocker.Lock()
defer tcpService.mapClientLocker.Unlock()
client,ok := tcpService.mapClient[clientId]
if ok == false{
client, ok := tcpService.mapClient[clientId]
if ok == false {
return
}
if client.tcpConn!=nil {
if client.tcpConn != nil {
client.tcpConn.Close()
}
return
}
func (tcpService *TcpService) GetClientIp(clientid uint64) string{
func (tcpService *TcpService) GetClientIp(clientId string) string {
tcpService.mapClientLocker.Lock()
defer tcpService.mapClientLocker.Unlock()
pClient,ok := tcpService.mapClient[clientid]
if ok == false{
pClient, ok := tcpService.mapClient[clientId]
if ok == false {
return ""
}
return pClient.tcpConn.GetRemoteIp()
}
func (tcpService *TcpService) SendRawMsg(clientId uint64,msg []byte) error{
func (tcpService *TcpService) SendRawMsg(clientId string, msg []byte) error {
tcpService.mapClientLocker.Lock()
client,ok := tcpService.mapClient[clientId]
if ok == false{
client, ok := tcpService.mapClient[clientId]
if ok == false {
tcpService.mapClientLocker.Unlock()
return fmt.Errorf("client %d is disconnect!",clientId)
return fmt.Errorf("client %s is disconnect", clientId)
}
tcpService.mapClientLocker.Unlock()
return client.tcpConn.WriteMsg(msg)
}
func (tcpService *TcpService) SendRawData(clientId uint64,data []byte) error{
func (tcpService *TcpService) SendRawData(clientId string, data []byte) error {
tcpService.mapClientLocker.Lock()
client,ok := tcpService.mapClient[clientId]
if ok == false{
client, ok := tcpService.mapClient[clientId]
if ok == false {
tcpService.mapClientLocker.Unlock()
return fmt.Errorf("client %d is disconnect!",clientId)
return fmt.Errorf("client %s is disconnect", clientId)
}
tcpService.mapClientLocker.Unlock()
return client.tcpConn.WriteRawMsg(data)
}
func (tcpService *TcpService) GetConnNum() int {
tcpService.mapClientLocker.Lock()
connNum := len(tcpService.mapClient)
@@ -278,14 +244,14 @@ func (tcpService *TcpService) GetConnNum() int {
return connNum
}
func (server *TcpService) SetNetMempool(mempool bytespool.IBytesMempool){
server.tcpServer.SetNetMempool(mempool)
func (tcpService *TcpService) SetNetMemPool(memPool bytespool.IBytesMemPool) {
tcpService.tcpServer.SetNetMemPool(memPool)
}
func (server *TcpService) GetNetMempool() bytespool.IBytesMempool {
return server.tcpServer.GetNetMempool()
func (tcpService *TcpService) GetNetMemPool() bytespool.IBytesMemPool {
return tcpService.tcpServer.GetNetMemPool()
}
func (server *TcpService) ReleaseNetMem(byteBuff []byte) {
server.tcpServer.GetNetMempool().ReleaseBytes(byteBuff)
func (tcpService *TcpService) ReleaseNetMem(byteBuff []byte) {
tcpService.tcpServer.GetNetMemPool().ReleaseBytes(byteBuff)
}

View File

@@ -2,100 +2,88 @@ package wsservice
import (
"fmt"
"github.com/duanhf2012/origin/event"
"github.com/duanhf2012/origin/log"
"github.com/duanhf2012/origin/network"
"github.com/duanhf2012/origin/network/processor"
"github.com/duanhf2012/origin/service"
"github.com/duanhf2012/origin/node"
"github.com/duanhf2012/origin/v2/event"
"github.com/duanhf2012/origin/v2/log"
"github.com/duanhf2012/origin/v2/network"
"github.com/duanhf2012/origin/v2/network/processor"
"github.com/duanhf2012/origin/v2/service"
"github.com/google/uuid"
"strings"
"sync"
"sync/atomic"
"time"
)
type WSService struct {
service.Service
wsServer network.WSServer
mapClientLocker sync.RWMutex
mapClient map[uint64] *WSClient
mapClient map[string]*WSClient
process processor.IProcessor
}
var seed uint32
type WSPackType int8
const(
WPT_Connected WSPackType = 0
const (
WPT_Connected WSPackType = 0
WPT_DisConnected WSPackType = 1
WPT_Pack WSPackType = 2
WPT_UnknownPack WSPackType = 3
WPT_Pack WSPackType = 2
WPT_UnknownPack WSPackType = 3
)
const Default_WS_MaxConnNum = 3000
const Default_WS_PendingWriteNum = 10000
const Default_WS_MaxMsgLen = 65535
const (
MaxNodeId = 1<<14 - 1 //最大值 16383
MaxSeed = 1<<19 - 1 //最大值 524287
MaxTime = 1<<31 - 1 //最大值 2147483647
)
type WSClient struct {
id uint64
wsConn *network.WSConn
id string
wsConn *network.WSConn
wsService *WSService
}
type WSPack struct {
Type WSPackType //0表示连接 1表示断开 2表示数据
MsgProcessor processor.IProcessor
ClientId uint64
ClientId string
Data interface{}
}
func (ws *WSService) OnInit() error{
func (ws *WSService) OnInit() error {
iConfig := ws.GetServiceCfg()
if iConfig == nil {
return fmt.Errorf("%s service config is error!", ws.GetName())
return fmt.Errorf("%s service config is error", ws.GetName())
}
wsCfg := iConfig.(map[string]interface{})
addr,ok := wsCfg["ListenAddr"]
addr, ok := wsCfg["ListenAddr"]
if ok == false {
return fmt.Errorf("%s service config is error!", ws.GetName())
return fmt.Errorf("%s service config is error", ws.GetName())
}
ws.wsServer.Addr = addr.(string)
ws.wsServer.MaxConnNum = Default_WS_MaxConnNum
ws.wsServer.PendingWriteNum = Default_WS_PendingWriteNum
ws.wsServer.MaxMsgLen = Default_WS_MaxMsgLen
MaxConnNum,ok := wsCfg["MaxConnNum"]
MaxConnNum, ok := wsCfg["MaxConnNum"]
if ok == true {
ws.wsServer.MaxConnNum = int(MaxConnNum.(float64))
}
PendingWriteNum,ok := wsCfg["PendingWriteNum"]
PendingWriteNum, ok := wsCfg["PendingWriteNum"]
if ok == true {
ws.wsServer.PendingWriteNum = int(PendingWriteNum.(float64))
}
MaxMsgLen,ok := wsCfg["MaxMsgLen"]
MaxMsgLen, ok := wsCfg["MaxMsgLen"]
if ok == true {
ws.wsServer.MaxMsgLen = uint32(MaxMsgLen.(float64))
}
ws.mapClient = make( map[uint64] *WSClient, ws.wsServer.MaxConnNum)
ws.mapClient = make(map[string]*WSClient, ws.wsServer.MaxConnNum)
ws.wsServer.NewAgent = ws.NewWSClient
ws.wsServer.Start()
return nil
return ws.wsServer.Start()
}
func (ws *WSService) SetMessageType(messageType int){
func (ws *WSService) SetMessageType(messageType int) {
ws.wsServer.SetMessageType(messageType)
}
@@ -107,104 +95,90 @@ func (ws *WSService) WSEventHandler(ev event.IEvent) {
case WPT_DisConnected:
pack.MsgProcessor.DisConnectedRoute(pack.ClientId)
case WPT_UnknownPack:
pack.MsgProcessor.UnknownMsgRoute(pack.ClientId,pack.Data)
pack.MsgProcessor.UnknownMsgRoute(pack.ClientId, pack.Data, ws.recyclerReaderBytes)
case WPT_Pack:
pack.MsgProcessor.MsgRoute(pack.ClientId,pack.Data)
pack.MsgProcessor.MsgRoute(pack.ClientId, pack.Data, ws.recyclerReaderBytes)
}
}
func (ws *WSService) SetProcessor(process processor.IProcessor,handler event.IEventHandler){
func (ws *WSService) SetProcessor(process processor.IProcessor, handler event.IEventHandler) {
ws.process = process
ws.RegEventReceiverFunc(event.Sys_Event_WebSocket,handler, ws.WSEventHandler)
}
func (ws *WSService) genId() uint64 {
if node.GetNodeId()>MaxNodeId{
panic("nodeId exceeds the maximum!")
}
newSeed := atomic.AddUint32(&seed,1) % MaxSeed
nowTime := uint64(time.Now().Unix())%MaxTime
return (uint64(node.GetNodeId())<<50)|(nowTime<<19)|uint64(newSeed)
ws.RegEventReceiverFunc(event.Sys_Event_WebSocket, handler, ws.WSEventHandler)
}
func (ws *WSService) NewWSClient(conn *network.WSConn) network.Agent {
ws.mapClientLocker.Lock()
defer ws.mapClientLocker.Unlock()
for {
clientId := ws.genId()
_,ok := ws.mapClient[clientId]
if ok == true {
continue
}
pClient := &WSClient{wsConn:conn, id: clientId}
pClient.wsService = ws
ws.mapClient[clientId] = pClient
return pClient
}
uuId, _ := uuid.NewUUID()
clientId := strings.ReplaceAll(uuId.String(), "-", "")
pClient := &WSClient{wsConn: conn, id: clientId}
pClient.wsService = ws
ws.mapClient[clientId] = pClient
return pClient
return nil
}
func (slf *WSClient) GetId() uint64 {
func (slf *WSClient) GetId() string {
return slf.id
}
func (slf *WSClient) Run() {
slf.wsService.NotifyEvent(&event.Event{Type:event.Sys_Event_WebSocket,Data:&WSPack{ClientId:slf.id,Type:WPT_Connected,MsgProcessor:slf.wsService.process}})
for{
bytes,err := slf.wsConn.ReadMsg()
slf.wsService.NotifyEvent(&event.Event{Type: event.Sys_Event_WebSocket, Data: &WSPack{ClientId: slf.id, Type: WPT_Connected, MsgProcessor: slf.wsService.process}})
for {
bytes, err := slf.wsConn.ReadMsg()
if err != nil {
log.Debug("read client id %d is error:%+v",slf.id,err)
log.Debugf("read client id %s is error:%+v", slf.id, err)
break
}
data,err:=slf.wsService.process.Unmarshal(slf.id,bytes)
data, err := slf.wsService.process.Unmarshal(slf.id, bytes)
if err != nil {
slf.wsService.NotifyEvent(&event.Event{Type:event.Sys_Event_WebSocket,Data:&WSPack{ClientId:slf.id,Type:WPT_UnknownPack,Data:bytes,MsgProcessor:slf.wsService.process}})
slf.wsService.NotifyEvent(&event.Event{Type: event.Sys_Event_WebSocket, Data: &WSPack{ClientId: slf.id, Type: WPT_UnknownPack, Data: bytes, MsgProcessor: slf.wsService.process}})
continue
}
slf.wsService.NotifyEvent(&event.Event{Type:event.Sys_Event_WebSocket,Data:&WSPack{ClientId:slf.id,Type:WPT_Pack,Data:data,MsgProcessor:slf.wsService.process}})
slf.wsService.NotifyEvent(&event.Event{Type: event.Sys_Event_WebSocket, Data: &WSPack{ClientId: slf.id, Type: WPT_Pack, Data: data, MsgProcessor: slf.wsService.process}})
}
}
func (slf *WSClient) OnClose(){
slf.wsService.NotifyEvent(&event.Event{Type:event.Sys_Event_WebSocket,Data:&WSPack{ClientId:slf.id,Type:WPT_DisConnected,MsgProcessor:slf.wsService.process}})
func (slf *WSClient) OnClose() {
slf.wsService.NotifyEvent(&event.Event{Type: event.Sys_Event_WebSocket, Data: &WSPack{ClientId: slf.id, Type: WPT_DisConnected, MsgProcessor: slf.wsService.process}})
slf.wsService.mapClientLocker.Lock()
defer slf.wsService.mapClientLocker.Unlock()
delete (slf.wsService.mapClient,slf.GetId())
delete(slf.wsService.mapClient, slf.GetId())
}
func (ws *WSService) SendMsg(clientid uint64,msg interface{}) error{
func (ws *WSService) SendMsg(clientId string, msg interface{}) error {
ws.mapClientLocker.Lock()
client,ok := ws.mapClient[clientid]
if ok == false{
client, ok := ws.mapClient[clientId]
if ok == false {
ws.mapClientLocker.Unlock()
return fmt.Errorf("client %d is disconnect!",clientid)
return fmt.Errorf("client %s is disconnect", clientId)
}
ws.mapClientLocker.Unlock()
bytes,err := ws.process.Marshal(clientid,msg)
bytes, err := ws.process.Marshal(clientId, msg)
if err != nil {
return err
}
return client.wsConn.WriteMsg(bytes)
}
func (ws *WSService) Close(clientid uint64) {
func (ws *WSService) Close(clientId string) {
ws.mapClientLocker.Lock()
defer ws.mapClientLocker.Unlock()
client,ok := ws.mapClient[clientid]
if ok == false{
client, ok := ws.mapClient[clientId]
if ok == false {
return
}
if client.wsConn!=nil {
if client.wsConn != nil {
client.wsConn.Close()
}
return
}
func (ws *WSService) recyclerReaderBytes(data []byte) {
}

View File

@@ -9,7 +9,7 @@ import (
func NewAesEncrypt(key string) (aes *AesEncrypt, err error) {
keyLen := len(key)
if keyLen < 16 {
err = fmt.Errorf("The length of res key shall not be less than 16")
err = fmt.Errorf("the length of res key shall not be less than 16")
return
}
aes = &AesEncrypt{
@@ -22,12 +22,12 @@ type AesEncrypt struct {
StrKey string
}
func (this *AesEncrypt) getKey() []byte {
keyLen := len(this.StrKey)
func (ae *AesEncrypt) getKey() []byte {
keyLen := len(ae.StrKey)
if keyLen < 16 {
panic("The length of res key shall not be less than 16")
}
arrKey := []byte(this.StrKey)
arrKey := []byte(ae.StrKey)
if keyLen >= 32 {
//取前32个字节
return arrKey[:32]
@@ -40,37 +40,37 @@ func (this *AesEncrypt) getKey() []byte {
return arrKey[:16]
}
//加密字符串
func (this *AesEncrypt) Encrypt(strMesg string) ([]byte, error) {
key := this.getKey()
var iv = []byte(key)[:aes.BlockSize]
encrypted := make([]byte, len(strMesg))
// Encrypt 加密字符串
func (ae *AesEncrypt) Encrypt(str string) ([]byte, error) {
key := ae.getKey()
var iv = key[:aes.BlockSize]
encrypted := make([]byte, len(str))
aesBlockEncrypter, err := aes.NewCipher(key)
if err != nil {
return nil, err
}
aesEncrypter := cipher.NewCFBEncrypter(aesBlockEncrypter, iv)
aesEncrypter.XORKeyStream(encrypted, []byte(strMesg))
aesEncrypter.XORKeyStream(encrypted, []byte(str))
return encrypted, nil
}
//解密字符串
func (this *AesEncrypt) Decrypt(src []byte) (strDesc string, err error) {
// Decrypt 解密字符串
func (ae *AesEncrypt) Decrypt(src []byte) (strDesc string, err error) {
defer func() {
//错误处理
if e := recover(); e != nil {
err = e.(error)
}
}()
key := this.getKey()
var iv = []byte(key)[:aes.BlockSize]
key := ae.getKey()
var iv = key[:aes.BlockSize]
decrypted := make([]byte, len(src))
var aesBlockDecrypter cipher.Block
aesBlockDecrypter, err = aes.NewCipher([]byte(key))
aesBlockDecrypter, err = aes.NewCipher(key)
if err != nil {
return "", err
}
aesDecrypter := cipher.NewCFBDecrypter(aesBlockDecrypter, iv)
aesDecrypter.XORKeyStream(decrypted, src)
return string(decrypted), nil
}
}

Some files were not shown because too many files have changed in this diff Show More