Compare commits

...

164 Commits

Author SHA1 Message Date
boyce
d1935b1bbc 优化日志 2024-05-27 18:09:37 +08:00
boyce
90d54bf3e2 1.新增服务和Global配置接口,支持通过结构体解析
2.优化日志
2024-05-15 10:06:50 +08:00
boyce
78cc33c84e 优化服务模板的配置读取 2024-05-11 15:11:24 +08:00
boyce
9cf21bf418 1.新增模板服务
2.优化消息队列
3.优化相关日志
2024-05-11 14:42:34 +08:00
boyce
c6d0bd9a19 优化gin模块 2024-05-08 14:18:39 +08:00
boyce
61bf95e457 优化RankService 2024-05-07 18:57:38 +08:00
boyce
8b2a551ee5 优化gin模块 2024-05-06 10:51:51 +08:00
boyce
927c2ffa37 优化gin模块 2024-05-06 10:36:04 +08:00
boyce
b23b30aac5 优化服务发现 2024-04-30 19:05:44 +08:00
boyce
03f8ba0316 精简事件通知 2024-04-30 17:33:34 +08:00
boyce
277480a7f0 Merge branch 'v2' of https://github.com/duanhf2012/origin into v2 2024-04-29 09:24:04 +08:00
boyce
647a654a36 补充优化说明文档 2024-04-29 09:23:54 +08:00
boyce
de483a88f1 优化停服 2024-04-28 18:02:20 +08:00
boyce
bbbb511b5f 新增kafka模块 2024-04-28 11:21:59 +08:00
boyce
0489ee3ef4 新增gin模块&优化pb处理器 2024-04-28 11:13:46 +08:00
boyce
692dacda0c 删除mongomodule,使用mongodbmodule替代 2024-04-28 10:40:56 +08:00
boyce
7f86b1007d 优化支持自定义logger 2024-04-26 18:13:41 +08:00
boyce
ba5e30ae2e 补充readme说明 2024-04-26 15:07:42 +08:00
boyce
0b1a1d2283 Merge branch 'master' into v2 2024-04-26 14:56:28 +08:00
origin
7780947a96 Merge pull request #878 from hjdtpz/master
导出RankData.RefreshTimestamp
2024-04-26 14:48:47 +08:00
boyce
d31b1ac657 更新readme说明以及依赖版本 2024-04-26 14:45:04 +08:00
boyce
27723bf684 新增v2版本说明 2024-04-26 14:02:59 +08:00
boyce
3f45b19bab 优化wsservice与tcpservice的clientid 2024-04-26 10:45:58 +08:00
boyce
94b4572c2f 修改go.mod 2024-04-26 09:53:51 +08:00
boyce
1d970de0f9 优化etcd服务发现 2024-04-25 18:27:47 +08:00
boyce
81625635ba 优化日志 2024-04-25 17:04:42 +08:00
boyce
c6ade7d3e1 优化origin默认服务发现 2024-04-25 16:36:10 +08:00
boyce
bba5eb2929 优化origin默认模式的服务发现 2024-04-25 11:20:06 +08:00
boyce
59efc05d24 新增rpc nats模块 2024-04-23 10:44:21 +08:00
hjdtpz
4c169cf0bb 导出RankData.RefreshTimestamp 2024-04-22 21:47:49 +08:00
boyce
e36693eeff 优化etcd发现服务,支持记录附加信息rpc 2024-04-19 16:39:57 +08:00
boyce
a26210f17f 新增etcd服务发现 2024-04-18 18:50:12 +08:00
boyce
0cf935ffa4 修改nodeid为字符串 2024-04-10 17:30:52 +08:00
boyce
161a67c8a1 提交v2初始版本 2024-04-09 16:40:13 +08:00
boyce
96d02c8f71 create new version 2024-04-09 15:33:06 +08:00
boyce
3a56282a0b 优化httpclient模块 2024-03-19 09:00:20 +08:00
boyce
eebbef52c9 优化重复启动验证 2024-03-08 10:32:32 +08:00
boyce
2ddc54f5ac 优化node启动 2024-03-05 17:38:02 +08:00
boyce
75ef7302de Merge branch 'master' of https://github.com/duanhf2012/origin 2024-03-01 17:47:08 +08:00
boyce
ecea9d1706 优化进程启动 2024-03-01 17:35:32 +08:00
boyce
4898116698 优化concurrent,禁止重复打开 2024-02-27 16:18:51 +08:00
boyce
bcbee6dd11 优化readme 2023-11-20 13:32:34 +08:00
duanhf2012
43122190a3 补充新增GetNodeInfo接口 2023-11-15 08:48:41 +08:00
duanhf2012
39b862e3d9 新增IsNodeRetire函数,用于判断NodeId是否为Retire状态 2023-10-09 16:57:05 +08:00
duanhf2012
8c9b796fce 优化日志输出 2023-10-09 16:50:09 +08:00
duanhf2012
c0971a46a7 新增retire命令参数用于将置于服务退休状态,使得服务软关闭 2023-10-09 15:27:01 +08:00
duanhf2012
ba019ac466 优化服务发现README文档 2023-09-24 13:53:18 +08:00
duanhf2012
c803b9b9ad 优化tcpservice 2023-09-22 16:29:31 +08:00
duanhf2012
3f52ea8331 优化筛选服务发现---支持对指定主master结点进行筛选 2023-09-22 15:43:41 +08:00
duanhf2012
2d1bee4dea 优化日志输出格式 2023-09-01 17:44:59 +08:00
duanhf2012
fa8cbfb40e 优化rpc处理异常问题 2023-08-29 18:23:20 +08:00
duanhf2012
388b946401 优化日志的ErrorAttr 2023-08-24 11:34:43 +08:00
duanhf2012
582a0faa6f 替换gogoprotobuf为google标准库 2023-08-21 15:44:55 +08:00
duanhf2012
fa6039e2cb 优化异步日志 2023-08-17 15:54:36 +08:00
duanhf2012
25a672ca53 新增优化异步日志 2023-08-17 15:35:22 +08:00
duanhf2012
75f881be28 新增异步日志功能 2023-08-17 14:00:36 +08:00
duanhf2012
ef8182eec7 替换日志库为slog 2023-08-15 15:46:38 +08:00
duanhf2012
4ad8204fde 优化module包的名称 2023-08-04 17:21:23 +08:00
duanhf2012
8f15546fb1 整理README格式 2023-08-01 11:12:54 +08:00
duanhf2012
0f3a965d73 补充与优化rpc超时功能RAEDME说明 2023-08-01 11:05:51 +08:00
duanhf2012
dfb6959843 优化消息队列默认超时推送时间 2023-07-28 17:43:39 +08:00
duanhf2012
dd4aaf9c57 优化rpc超时 2023-07-28 17:38:52 +08:00
duanhf2012
6ef98a2104 优化新增Rpc压缩与自定义超时功能 2023-07-25 17:36:47 +08:00
duanhf2012
1890b300ee 优化新增rpc压缩功能 2023-07-25 15:13:58 +08:00
duanhf2012
6fea2226e1 优化协议解析器 2023-07-21 17:03:15 +08:00
duanhf2012
ec1c2b4517 node支持rpc压缩 2023-07-21 15:28:52 +08:00
duanhf2012
4b84d9a1d5 新增rpc自定义超时 2023-07-13 16:42:23 +08:00
duanhf2012
85a8ec58e5 rpc加入压缩功能 2023-07-10 14:22:23 +08:00
duanhf2012
962016d476 优化rpc 2023-07-07 13:50:57 +08:00
duanhf2012
a61979e985 优化消息队列服务持久化 2023-05-17 11:26:29 +08:00
duanhf2012
6de25d1c6d 优化rankservice错误返回 2023-05-09 14:34:33 +08:00
duanhf2012
b392617d6e 优化性能监控与rankservice持久化 2023-05-09 14:06:17 +08:00
duanhf2012
92fdb7860c 优化本地node中的服务rpc 2023-05-04 17:53:42 +08:00
duanhf2012
f78d0d58be 优化rpc与rankservice持久化 2023-05-04 17:35:40 +08:00
duanhf2012
5675681ab1 优化concurrent与rpc模块 2023-05-04 14:21:29 +08:00
duanhf2012
ddeaaf7d77 优化concurrent模块 2023-04-11 10:29:06 +08:00
duanhf2012
1174b47475 IService接口新增扩展IConcurrent 2023-04-04 16:36:05 +08:00
duanhf2012
18fff3b567 优化concurrent模块,新增返回值控制是否回调 2023-03-31 15:12:27 +08:00
duanhf2012
7ab6c88f9c 整理优化rpc 2023-03-23 10:06:41 +08:00
duanhf2012
6b64de06a2 优化增加TcpService的包长度字段配置 2023-03-22 14:59:22 +08:00
duanhf2012
95b153f8cf 优化network包长度字段自动计算 2023-03-20 15:20:04 +08:00
duanhf2012
f3ff09b90f 优化rpc调用错误日志
限制配置的服务必需安装
优化结点断开连接时删除结点
2023-03-17 12:09:00 +08:00
duanhf2012
f9738fb9d0 Merge branch 'master' of https://github.com/duanhf2012/origin 2023-03-06 15:57:33 +08:00
duanhf2012
91e773aa8c 补充说明RPC函数名的规范支持RPCFunctionName 2023-03-06 15:57:23 +08:00
origin
c9b96404f4 Merge pull request #873 from duanhf2012/dependabot/go_modules/golang.org/x/crypto-0.1.0
Bump golang.org/x/crypto from 0.0.0-20201216223049-8b5274cf687f to 0.1.0
2023-03-06 15:45:40 +08:00
duanhf2012
aaae63a674 新增支持RPC函数命名RPCXXX格式 2023-03-06 15:41:51 +08:00
duanhf2012
47dc21aee1 优化rpc返回参数与请求参数不一致时报错 2023-03-06 11:47:23 +08:00
dependabot[bot]
4d09532801 Bump golang.org/x/crypto from 0.0.0-20201216223049-8b5274cf687f to 0.1.0
Bumps [golang.org/x/crypto](https://github.com/golang/crypto) from 0.0.0-20201216223049-8b5274cf687f to 0.1.0.
- [Release notes](https://github.com/golang/crypto/releases)
- [Commits](https://github.com/golang/crypto/commits/v0.1.0)

---
updated-dependencies:
- dependency-name: golang.org/x/crypto
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-03-06 02:26:33 +00:00
origin
d3ad7fc898 Merge pull request #871 from duanhf2012/dependabot/go_modules/golang.org/x/text-0.3.8
Bump golang.org/x/text from 0.3.6 to 0.3.8
2023-03-06 10:08:56 +08:00
dependabot[bot]
ba2b0568b2 Bump golang.org/x/text from 0.3.6 to 0.3.8
Bumps [golang.org/x/text](https://github.com/golang/text) from 0.3.6 to 0.3.8.
- [Release notes](https://github.com/golang/text/releases)
- [Commits](https://github.com/golang/text/compare/v0.3.6...v0.3.8)

---
updated-dependencies:
- dependency-name: golang.org/x/text
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-02-23 07:30:17 +00:00
duanhf2012
5a3600bd62 补充说明服务的安装启动与停止顺序 2023-02-22 15:43:56 +08:00
duanhf2012
4783d05e75 优化并发模块 2023-02-22 15:13:51 +08:00
duanhf2012
8cc1b1afcb 补充并发函数调用说明 2023-02-22 10:49:01 +08:00
duanhf2012
53d9392901 新增《并发函数调用》说明 2023-02-22 10:45:17 +08:00
duanhf2012
8111b12da5 新增异步函数执行功能 2023-02-22 09:53:50 +08:00
duanhf2012
0ebbe0e31d 优化服务的启停顺序 2023-02-16 15:59:07 +08:00
duanhf2012
e326e342f2 新增排行榜扩展数据以及数据的修改 2023-02-13 17:04:36 +08:00
duanhf2012
a7c6b45764 优化本结点与跨结点Rpc结构&简化原始Rpc接口 2023-01-31 13:50:41 +08:00
duanhf2012
541abd93b4 Merge branch 'master' of https://github.com/duanhf2012/origin 2023-01-29 10:16:23 +08:00
origin
8c8d681093 优化消息队列-当没有新消息时加大延迟时间 2023-01-28 16:25:30 +08:00
origin
b8150cfc51 扩展索引排序 2023-01-13 10:59:36 +08:00
origin
3833884777 优化消息队列 2022-12-29 14:54:54 +08:00
duanhf2012
60064cbba6 优化网络模块 2022-12-27 14:20:18 +08:00
orgin
66770f07a5 优化消息队列 2022-12-09 12:42:47 +08:00
orgin
76c8541b34 优化消息队列持久化日志 2022-12-01 14:19:02 +08:00
orgin
b1fee9bc57 优化消息队列服务日志 2022-11-29 17:25:34 +08:00
orgin
284d43dc71 优化rpc 2022-11-29 13:38:07 +08:00
orgin
fd43863b73 优化rpc 2022-11-29 09:50:27 +08:00
orgin
1fcd870f1d 优化消息队列服务 2022-11-26 16:15:41 +08:00
orgin
11b78f84c4 优化rpc 2022-11-26 14:36:03 +08:00
orgin
8c6ee24b16 优化rpc 2022-11-26 14:16:25 +08:00
orgin
ca23925796 优化持久化存储模块 2022-11-25 11:45:35 +08:00
orgin
afb04cac7f 优化排行榜RankService,支持RPC接口新增排行榜功能 2022-11-23 17:22:58 +08:00
orgin
975cf93d58 优化排行榜RankService 2022-11-23 16:31:15 +08:00
orgin
c7e0fcbdbb 优化排行榜RankService,新增mongodb持久化Module 2022-11-23 09:45:26 +08:00
orgin
5bea050f63 优化RankService排行榜服务 2022-11-19 16:00:16 +08:00
orgin
95b4e2f8de 新增排行榜服务 2022-11-18 15:45:20 +08:00
orgin
5601ab5ae2 扩展CountDocument条件参数 2022-11-17 09:26:10 +08:00
orgin
d28094eefa 优化消息队列化的跨天的数据延迟问题 2022-11-16 11:19:36 +08:00
orgin
68dfbc46f0 新增自定义持久化的消息队列 2022-11-15 17:09:17 +08:00
orgin
80c73b0bdb 新增获取http服务的url方法 2022-11-15 15:18:00 +08:00
orgin
d9afeed6ee 优化初始化服务失败的错误提示 2022-11-15 09:01:21 +08:00
orgin
a32ff59676 优化二分查找算法,补充注释 2022-11-11 14:17:29 +08:00
orgin
2101c8903c 优化服务自我rpc调用 2022-11-04 18:23:58 +08:00
orgin
5214f094bf 优化服务发现逻辑 2022-10-28 14:17:37 +08:00
orgin
fd364cf579 优化RPC 2022-10-27 15:38:28 +08:00
orgin
1eab31209c 补充readme关于httpservice手动开启http监听方法 2022-10-25 17:16:09 +08:00
orgin
2da3ccae39 优化服务自我rpc调用 2022-10-25 17:06:13 +08:00
orgin
da18cf3158 优化服务内的RPC调用 2022-10-24 16:51:04 +08:00
orgin
c3484e9d5b httpservice新增ManualStart开关,支持手动开始监听 2022-10-24 11:03:32 +08:00
orgin
b87a78c85b 优化redis模块 2022-10-19 13:47:57 +08:00
orgin
17a448f75c 优化配置读取规则,只允许读取.json配置 2022-10-13 14:29:49 +08:00
orgin
d87ad419c8 升级go1.19 2022-10-13 13:57:22 +08:00
orgin
298a5d3721 优化service配置读取规则 2022-10-13 13:54:07 +08:00
orgin
64fb9368bf 优化网络底层写超时 2022-10-13 10:39:29 +08:00
orgin
7f93aa5ff9 新增对连续buff内存中指定索引位打标记函数 2022-09-27 16:49:11 +08:00
orgin
7a8d312aeb 修复WSService服务在特定服务安装顺序下设置MessageType不成功的问题 2022-09-23 10:49:44 +08:00
orgin
f931f61f7b 新增编译标记以及编译操作系统类型 2022-09-22 10:54:27 +08:00
orgin
151ed123f4 优化WSClient的MessageType 2022-09-22 10:50:24 +08:00
orgin
5a6a4c8a0d 新增编译标记以及编译操作系统类型 2022-09-22 10:37:27 +08:00
orgin
280c04a5d7 优化二分查找算法 2022-09-08 09:18:18 +08:00
orgin
1520dae223 替换ioutil包为os/io包,它在1.16开始被弃用 2022-08-17 14:28:01 +08:00
orgin
84f3429564 新增加、减、乘的运算函数 2022-08-08 15:28:21 +08:00
orgin
89fd5d273b 优化自恢复开协程函数GoRecover 2022-07-15 20:25:51 +08:00
orgin
3ce873ef04 优化获取NodeId接口 2022-07-11 15:34:28 +08:00
orgin
3763f7d848 整理优化cluster 2022-07-11 10:55:57 +08:00
orgin
769f680b17 新增服务发现事件监听 2022-07-07 13:38:38 +08:00
orgin
77988906f8 优化记录rpc的方法名称 2022-06-30 17:56:16 +08:00
orgin
ae0ba1d966 优化循环队列test 2022-06-30 10:38:05 +08:00
orgin
f61fd5d1be 新增循环队列 2022-06-30 09:50:32 +08:00
orgin
eb1867c5fd 扩展IService新增接口 2022-06-29 11:15:22 +08:00
orgin
8823d5fba4 扩展服务,新增自定义服务事件管道大小接口 2022-06-28 17:33:03 +08:00
orgin
9945c29a5c 优化内存池,当递归重复释放时报错 2022-06-27 19:46:21 +08:00
orgin
173d84f7e6 优化websocket 2022-06-23 10:07:50 +08:00
orgin
d687780517 新增通用math函数 2022-06-15 14:30:20 +08:00
orgin
a8a030c0f5 origin升级为1.18 2022-06-15 11:13:38 +08:00
orgin
f8469ea10e 新增算法模块 2022-06-15 10:31:10 +08:00
orgin
be3daf19f9 优化服务配置获取函数 2022-06-09 17:44:42 +08:00
orgin
aa91c7bf1b Merge pull request #860 from cgc1983/master
module exit error
2022-06-09 11:52:50 +08:00
cgc1983
7fe73e55fb module exit error 2022-06-09 11:32:25 +08:00
cgc1983
e5ceaa9e76 ignore macosx 2022-06-02 17:51:03 +08:00
orgin
97c55ada71 优化网络连接Id生成规则 2022-06-02 17:07:01 +08:00
orgin
776b234022 优化网络连接Id生成规则&优化WebSocket服务 2022-06-02 16:09:16 +08:00
orgin
a4f425bd69 同步jsonprocessor 2022-05-31 10:57:47 +08:00
125 changed files with 16727 additions and 5713 deletions

1
.gitignore vendored
View File

@@ -10,3 +10,4 @@
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
.DS_Store

1079
README.md

File diff suppressed because it is too large Load Diff

View File

@@ -2,11 +2,14 @@ package cluster
import (
"fmt"
"github.com/duanhf2012/origin/log"
"github.com/duanhf2012/origin/rpc"
"github.com/duanhf2012/origin/service"
"github.com/duanhf2012/origin/v2/log"
"github.com/duanhf2012/origin/v2/rpc"
"github.com/duanhf2012/origin/v2/service"
"strings"
"sync"
"github.com/duanhf2012/origin/v2/event"
"errors"
"reflect"
)
var configDir = "./config/"
@@ -20,17 +23,25 @@ const (
Discard NodeStatus = 1 //丢弃
)
type DiscoveryService struct {
MasterNodeId string //要筛选的主结点Id如果不配置或者配置成0表示针对所有的主结点
NetworkName string //如果是etcd指定要筛选的网络名中的服务不配置表示所有的网络
ServiceList []string //只发现的服务列表
}
type NodeInfo struct {
NodeId int
NodeName string
NodeId string
Private bool
ListenAddr string
MaxRpcParamLen uint32 //最大Rpc参数长度
ServiceList []string //所有的服务列表
CompressBytesLen int //超过字节进行压缩的长度
ServiceList []string //所有的有序服务列表
PublicServiceList []string //对外公开的服务列表
DiscoveryService []string //筛选发现的服务,如果不配置,不进行筛选
NeighborService []string
DiscoveryService []DiscoveryService //筛选发现的服务,如果不配置,不进行筛选
status NodeStatus
Retire bool
NetworkName string
}
type NodeRpcInfo struct {
@@ -42,18 +53,23 @@ var cluster Cluster
type Cluster struct {
localNodeInfo NodeInfo //本结点配置信息
masterDiscoveryNodeList []NodeInfo //配置发现Master结点
discoveryInfo DiscoveryInfo //服务发现配置
rpcMode RpcMode
globalCfg interface{} //全局配置
localServiceCfg map[string]interface{} //map[serviceName]配置数据*
mapRpc map[int]NodeRpcInfo //nodeId
serviceDiscovery IServiceDiscovery //服务发现接口
locker sync.RWMutex //结点与服务关系保护锁
mapIdNode map[int]NodeInfo //map[NodeId]NodeInfo
mapServiceNode map[string]map[int]struct{} //map[serviceName]map[NodeId]
mapRpc map[string]*NodeRpcInfo //nodeId
mapServiceNode map[string]map[string]struct{} //map[serviceName]map[NodeId]
mapTemplateServiceNode map[string]map[string]struct{} //map[templateServiceName]map[serviceName]nodeId
callSet rpc.CallSet
rpcNats rpc.RpcNats
rpcServer rpc.IServer
rpcServer rpc.Server
rpcEventLocker sync.RWMutex //Rpc事件监听保护锁
mapServiceListenRpcEvent map[string]struct{} //ServiceName
}
@@ -70,81 +86,72 @@ func SetServiceDiscovery(serviceDiscovery IServiceDiscovery) {
cluster.serviceDiscovery = serviceDiscovery
}
func (cls *Cluster) Start() {
cls.rpcServer.Start(cls.localNodeInfo.ListenAddr, cls.localNodeInfo.MaxRpcParamLen)
func (cls *Cluster) Start() error{
return cls.rpcServer.Start()
}
func (cls *Cluster) Stop() {
cls.serviceDiscovery.OnNodeStop()
cls.rpcServer.Stop()
}
func (cls *Cluster) DiscardNode(nodeId int) {
func (cls *Cluster) DiscardNode(nodeId string) {
cls.locker.Lock()
nodeInfo, ok := cls.mapIdNode[nodeId]
nodeInfo, ok := cls.mapRpc[nodeId]
bDel := (ok == true) && nodeInfo.nodeInfo.status == Discard
cls.locker.Unlock()
if ok == true && nodeInfo.status == Discard {
cls.DelNode(nodeId, true)
if bDel {
cls.DelNode(nodeId)
}
}
func (cls *Cluster) DelNode(nodeId int, immediately bool) {
func (cls *Cluster) DelNode(nodeId string) {
//MasterDiscover结点与本地结点不删除
if cls.GetMasterDiscoveryNodeInfo(nodeId) != nil || nodeId == cls.localNodeInfo.NodeId {
if cls.IsOriginMasterDiscoveryNode(nodeId) || nodeId == cls.localNodeInfo.NodeId {
return
}
cls.locker.Lock()
nodeInfo, ok := cls.mapIdNode[nodeId]
defer cls.locker.Unlock()
rpc, ok := cls.mapRpc[nodeId]
if ok == false {
cls.locker.Unlock()
return
}
rpc, ok := cls.mapRpc[nodeId]
for {
//立即删除
if immediately || ok == false {
break
}
rpc.client.Lock()
//正在连接中不主动断开,只断开没有连接中的
if rpc.client.IsConnected() {
nodeInfo.status = Discard
rpc.client.Unlock()
cls.locker.Unlock()
log.SRelease("Discard node ", nodeInfo.NodeId, " ", nodeInfo.ListenAddr)
return
}
rpc.client.Unlock()
break
}
for _, serviceName := range nodeInfo.ServiceList {
cls.TriggerDiscoveryEvent(false,nodeId,rpc.nodeInfo.ServiceList)
for _, serviceName := range rpc.nodeInfo.ServiceList {
cls.delServiceNode(serviceName, nodeId)
}
delete(cls.mapIdNode, nodeId)
delete(cls.mapRpc, nodeId)
cls.locker.Unlock()
if ok == true {
rpc.client.Close(false)
}
log.SRelease("remove node ", nodeInfo.NodeId, " ", nodeInfo.ListenAddr)
log.Info("remove node ",log.String("NodeId", rpc.nodeInfo.NodeId),log.String("ListenAddr", rpc.nodeInfo.ListenAddr))
}
func (cls *Cluster) serviceDiscoveryDelNode(nodeId int, immediately bool) {
if nodeId == 0 {
func (cls *Cluster) serviceDiscoveryDelNode(nodeId string) {
cls.DelNode(nodeId)
}
func (cls *Cluster) delServiceNode(serviceName string, nodeId string) {
if nodeId == cls.localNodeInfo.NodeId {
return
}
cls.DelNode(nodeId, immediately)
}
//处理模板服务
splitServiceName := strings.Split(serviceName,":")
if len(splitServiceName) == 2 {
serviceName = splitServiceName[0]
templateServiceName := splitServiceName[1]
func (cls *Cluster) delServiceNode(serviceName string, nodeId int) {
if nodeId == cls.localNodeInfo.NodeId {
return
mapService := cls.mapTemplateServiceNode[templateServiceName]
delete(mapService,serviceName)
if len(cls.mapTemplateServiceNode[templateServiceName]) == 0 {
delete(cls.mapTemplateServiceNode,templateServiceName)
}
}
mapNode := cls.mapServiceNode[serviceName]
@@ -164,66 +171,91 @@ func (cls *Cluster) serviceDiscoverySetNodeInfo(nodeInfo *NodeInfo) {
defer cls.locker.Unlock()
//先清一次的NodeId对应的所有服务清理
lastNodeInfo, ok := cls.mapIdNode[nodeInfo.NodeId]
lastNodeInfo, ok := cls.mapRpc[nodeInfo.NodeId]
if ok == true {
for _, serviceName := range lastNodeInfo.ServiceList {
for _, serviceName := range lastNodeInfo.nodeInfo.ServiceList {
cls.delServiceNode(serviceName, nodeInfo.NodeId)
}
}
cluster.TriggerDiscoveryEvent(true,nodeInfo.NodeId,nodeInfo.PublicServiceList)
//再重新组装
mapDuplicate := map[string]interface{}{} //预防重复数据
for _, serviceName := range nodeInfo.PublicServiceList {
if _, ok := mapDuplicate[serviceName]; ok == true {
//存在重复
log.SError("Bad duplicate Service Cfg.")
log.Error("Bad duplicate Service Cfg.")
continue
}
mapDuplicate[serviceName] = nil
if _, ok := cls.mapServiceNode[serviceName]; ok == false {
cls.mapServiceNode[serviceName] = make(map[int]struct{}, 1)
//如果是模板服务,则记录模板关系
splitServiceName := strings.Split(serviceName,":")
if len(splitServiceName) == 2 {
serviceName = splitServiceName[0]
templateServiceName := splitServiceName[1]
//记录模板
if _, ok = cls.mapTemplateServiceNode[templateServiceName]; ok == false {
cls.mapTemplateServiceNode[templateServiceName]=map[string]struct{}{}
}
cls.mapTemplateServiceNode[templateServiceName][serviceName] = struct{}{}
}
if _, ok = cls.mapServiceNode[serviceName]; ok == false {
cls.mapServiceNode[serviceName] = make(map[string]struct{}, 1)
}
cls.mapServiceNode[serviceName][nodeInfo.NodeId] = struct{}{}
}
cls.mapIdNode[nodeInfo.NodeId] = *nodeInfo
log.SRelease("Discovery nodeId: ", nodeInfo.NodeId, " services:", nodeInfo.PublicServiceList)
//已经存在连接,则不需要进行设置
if _, rpcInfoOK := cls.mapRpc[nodeInfo.NodeId]; rpcInfoOK == true {
if lastNodeInfo != nil {
log.Info("Discovery nodeId",log.String("NodeId", nodeInfo.NodeId),log.Any("services:", nodeInfo.PublicServiceList),log.Bool("Retire",nodeInfo.Retire))
lastNodeInfo.nodeInfo = *nodeInfo
return
}
//不存在时,则建立连接
rpcInfo := NodeRpcInfo{}
rpcInfo.nodeInfo = *nodeInfo
rpcInfo.client = &rpc.Client{}
rpcInfo.client.TriggerRpcEvent = cls.triggerRpcEvent
rpcInfo.client.Connect(nodeInfo.NodeId, nodeInfo.ListenAddr, nodeInfo.MaxRpcParamLen)
cls.mapRpc[nodeInfo.NodeId] = rpcInfo
if cls.IsNatsMode() {
rpcInfo.client = cls.rpcNats.NewNatsClient(nodeInfo.NodeId, cls.GetLocalNodeInfo().NodeId,&cls.callSet,cls.NotifyAllService)
}else{
rpcInfo.client =rpc.NewRClient(nodeInfo.NodeId, nodeInfo.ListenAddr, nodeInfo.MaxRpcParamLen,cls.localNodeInfo.CompressBytesLen,&cls.callSet,cls.NotifyAllService)
}
cls.mapRpc[nodeInfo.NodeId] = &rpcInfo
if cls.IsNatsMode() == true || cls.discoveryInfo.discoveryType!=OriginType {
log.Info("Discovery nodeId and new rpc client",log.String("NodeId", nodeInfo.NodeId),log.Any("services:", nodeInfo.PublicServiceList),log.Bool("Retire",nodeInfo.Retire))
}else{
log.Info("Discovery nodeId and new rpc client",log.String("NodeId", nodeInfo.NodeId),log.Any("services:", nodeInfo.PublicServiceList),log.Bool("Retire",nodeInfo.Retire),log.String("nodeListenAddr",nodeInfo.ListenAddr))
}
}
func (cls *Cluster) buildLocalRpc() {
rpcInfo := NodeRpcInfo{}
rpcInfo.nodeInfo = cls.localNodeInfo
rpcInfo.client = &rpc.Client{}
rpcInfo.client.Connect(rpcInfo.nodeInfo.NodeId, "", 0)
cls.mapRpc[cls.localNodeInfo.NodeId] = rpcInfo
}
func (cls *Cluster) Init(localNodeId int, setupServiceFun SetupServiceFun) error {
func (cls *Cluster) Init(localNodeId string, setupServiceFun SetupServiceFun) error {
//1.初始化配置
err := cls.InitCfg(localNodeId)
if err != nil {
return err
}
cls.rpcServer.Init(cls)
cls.buildLocalRpc()
cls.callSet.Init()
if cls.IsNatsMode() {
cls.rpcNats.Init(cls.rpcMode.Nats.NatsUrl,cls.rpcMode.Nats.NoRandomize,cls.GetLocalNodeInfo().NodeId,cls.localNodeInfo.CompressBytesLen,cls,cluster.NotifyAllService)
cls.rpcServer = &cls.rpcNats
}else{
s := &rpc.Server{}
s.Init(cls.localNodeInfo.ListenAddr,cls.localNodeInfo.MaxRpcParamLen,cls.localNodeInfo.CompressBytesLen,cls)
cls.rpcServer = s
}
//2.安装服务发现结点
cls.SetupServiceDiscovery(localNodeId, setupServiceFun)
err = cls.setupDiscovery(localNodeId, setupServiceFun)
if err != nil {
log.Error("setupDiscovery fail",log.ErrorAttr("err",err))
return err
}
service.RegRpcEventFun = cls.RegRpcEvent
service.UnRegRpcEventFun = cls.UnRegRpcEvent
err = cls.serviceDiscovery.InitDiscovery(localNodeId, cls.serviceDiscoveryDelNode, cls.serviceDiscoverySetNodeInfo)
if err != nil {
@@ -233,74 +265,6 @@ func (cls *Cluster) Init(localNodeId int, setupServiceFun SetupServiceFun) error
return nil
}
func (cls *Cluster) checkDynamicDiscovery(localNodeId int) (bool, bool) {
var localMaster bool //本结点是否为Master结点
var hasMaster bool //是否配置Master服务
//遍历所有结点
for _, nodeInfo := range cls.masterDiscoveryNodeList {
if nodeInfo.NodeId == localNodeId {
localMaster = true
}
hasMaster = true
}
//返回查询结果
return localMaster, hasMaster
}
func (cls *Cluster) appendService(serviceName string, bPublicService bool) {
cls.localNodeInfo.ServiceList = append(cls.localNodeInfo.ServiceList, serviceName)
if bPublicService {
cls.localNodeInfo.PublicServiceList = append(cls.localNodeInfo.PublicServiceList, serviceName)
}
if _, ok := cls.mapServiceNode[serviceName]; ok == false {
cls.mapServiceNode[serviceName] = map[int]struct{}{}
}
cls.mapServiceNode[serviceName][cls.localNodeInfo.NodeId] = struct{}{}
}
func (cls *Cluster) GetDiscoveryNodeList() []NodeInfo {
return cls.masterDiscoveryNodeList
}
func (cls *Cluster) GetMasterDiscoveryNodeInfo(nodeId int) *NodeInfo {
for i := 0; i < len(cls.masterDiscoveryNodeList); i++ {
if cls.masterDiscoveryNodeList[i].NodeId == nodeId {
return &cls.masterDiscoveryNodeList[i]
}
}
return nil
}
func (cls *Cluster) IsMasterDiscoveryNode() bool {
return cls.GetMasterDiscoveryNodeInfo(cls.GetLocalNodeInfo().NodeId) != nil
}
func (cls *Cluster) SetupServiceDiscovery(localNodeId int, setupServiceFun SetupServiceFun) {
if cls.serviceDiscovery != nil {
return
}
//1.如果没有配置DiscoveryNode配置则使用默认配置文件发现服务
localMaster, hasMaster := cls.checkDynamicDiscovery(localNodeId)
if hasMaster == false {
cls.serviceDiscovery = &ConfigDiscovery{}
return
}
setupServiceFun(&masterService, &clientService)
//2.如果为动态服务发现安装本地发现服务
cls.serviceDiscovery = getDynamicDiscovery()
if localMaster == true {
cls.appendService(DynamicDiscoveryMasterName, false)
}
cls.appendService(DynamicDiscoveryClientName, true)
}
func (cls *Cluster) FindRpcHandler(serviceName string) rpc.IRpcHandler {
pService := service.GetService(serviceName)
if pService == nil {
@@ -310,73 +274,90 @@ func (cls *Cluster) FindRpcHandler(serviceName string) rpc.IRpcHandler {
return pService.GetRpcHandler()
}
func (cls *Cluster) getRpcClient(nodeId int) *rpc.Client {
func (cls *Cluster) getRpcClient(nodeId string) (*rpc.Client,bool) {
c, ok := cls.mapRpc[nodeId]
if ok == false {
return nil
return nil,false
}
return c.client
return c.client,c.nodeInfo.Retire
}
func (cls *Cluster) GetRpcClient(nodeId int) *rpc.Client {
func (cls *Cluster) GetRpcClient(nodeId string) (*rpc.Client,bool) {
cls.locker.RLock()
defer cls.locker.RUnlock()
return cls.getRpcClient(nodeId)
}
func GetRpcClient(nodeId int, serviceMethod string, clientList []*rpc.Client) (error, int) {
if nodeId > 0 {
pClient := GetCluster().GetRpcClient(nodeId)
func GetNodeIdByTemplateService(templateServiceName string, rpcClientList []*rpc.Client, filterRetire bool) (error, []*rpc.Client) {
return GetCluster().GetNodeIdByTemplateService(templateServiceName, rpcClientList, filterRetire)
}
func GetRpcClient(nodeId string, serviceMethod string,filterRetire bool, clientList []*rpc.Client) (error, []*rpc.Client) {
if nodeId != rpc.NodeIdNull {
pClient,retire := GetCluster().GetRpcClient(nodeId)
if pClient == nil {
return fmt.Errorf("cannot find nodeid %d!", nodeId), 0
return fmt.Errorf("cannot find nodeid %d!", nodeId), nil
}
clientList[0] = pClient
return nil, 1
//如果需要筛选掉退休结点
if filterRetire == true && retire == true {
return fmt.Errorf("cannot find nodeid %d!", nodeId), nil
}
clientList = append(clientList,pClient)
return nil, clientList
}
findIndex := strings.Index(serviceMethod, ".")
if findIndex == -1 {
return fmt.Errorf("servicemethod param %s is error!", serviceMethod), 0
return fmt.Errorf("servicemethod param %s is error!", serviceMethod), nil
}
serviceName := serviceMethod[:findIndex]
//1.找到对应的rpcNodeid
return GetCluster().GetNodeIdByService(serviceName, clientList, true)
return GetCluster().GetNodeIdByService(serviceName, clientList, filterRetire)
}
func GetRpcServer() *rpc.Server {
return &cluster.rpcServer
func GetRpcServer() rpc.IServer {
return cluster.rpcServer
}
func (cls *Cluster) IsNodeConnected(nodeId int) bool {
pClient := cls.GetRpcClient(nodeId)
func (cls *Cluster) IsNodeConnected(nodeId string) bool {
pClient,_ := cls.GetRpcClient(nodeId)
return pClient != nil && pClient.IsConnected()
}
func (cls *Cluster) triggerRpcEvent(bConnect bool, clientSeq uint32, nodeId int) {
cls.locker.Lock()
nodeInfo, ok := cls.mapRpc[nodeId]
if ok == false || nodeInfo.client == nil || nodeInfo.client.GetClientSeq() != clientSeq {
cls.locker.Unlock()
return
}
cls.locker.Unlock()
func (cls *Cluster) IsNodeRetire(nodeId string) bool {
cls.locker.RLock()
defer cls.locker.RUnlock()
_,retire :=cls.getRpcClient(nodeId)
return retire
}
func (cls *Cluster) NotifyAllService(event event.IEvent){
cls.rpcEventLocker.Lock()
defer cls.rpcEventLocker.Unlock()
for serviceName, _ := range cls.mapServiceListenRpcEvent {
ser := service.GetService(serviceName)
if ser == nil {
log.SError("cannot find service name ", serviceName)
log.Error("cannot find service name "+serviceName)
continue
}
var eventData service.RpcConnEvent
eventData.IsConnect = bConnect
eventData.NodeId = nodeId
ser.(service.IModule).NotifyEvent(&eventData)
ser.(service.IModule).NotifyEvent(event)
}
cls.rpcEventLocker.Unlock()
}
func (cls *Cluster) TriggerDiscoveryEvent(bDiscovery bool, nodeId string, serviceName []string) {
var eventData service.DiscoveryServiceEvent
eventData.IsDiscovery = bDiscovery
eventData.NodeId = nodeId
eventData.ServiceName = serviceName
cls.NotifyAllService(&eventData)
}
func (cls *Cluster) GetLocalNodeInfo() *NodeInfo {
@@ -399,15 +380,7 @@ func (cls *Cluster) UnRegRpcEvent(serviceName string) {
cls.rpcEventLocker.Unlock()
}
func (cls *Cluster) FetchAllNodeId(fetchFun func(nodeId int)) {
cls.locker.Lock()
for nodeId, _ := range cls.mapIdNode {
fetchFun(nodeId)
}
cls.locker.Unlock()
}
func HasService(nodeId int, serviceName string) bool {
func HasService(nodeId string, serviceName string) bool {
cluster.locker.RLock()
defer cluster.locker.RUnlock()
@@ -420,6 +393,98 @@ func HasService(nodeId int, serviceName string) bool {
return false
}
func GetNodeByServiceName(serviceName string) map[string]struct{} {
cluster.locker.RLock()
defer cluster.locker.RUnlock()
mapNode, ok := cluster.mapServiceNode[serviceName]
if ok == false {
return nil
}
mapNodeId := map[string]struct{}{}
for nodeId,_ := range mapNode {
mapNodeId[nodeId] = struct{}{}
}
return mapNodeId
}
// GetNodeByTemplateServiceName 通过模板服务名获取服务名,返回 map[serviceName真实服务名]NodeId
func GetNodeByTemplateServiceName(templateServiceName string) map[string]string {
cluster.locker.RLock()
defer cluster.locker.RUnlock()
mapServiceName := cluster.mapTemplateServiceNode[templateServiceName]
mapNodeId := make(map[string]string,9)
for serviceName := range mapServiceName {
mapNode, ok := cluster.mapServiceNode[serviceName]
if ok == false {
return nil
}
for nodeId,_ := range mapNode {
mapNodeId[serviceName] = nodeId
}
}
return mapNodeId
}
func (cls *Cluster) GetGlobalCfg() interface{} {
return cls.globalCfg
}
func (cls *Cluster) ParseGlobalCfg(cfg interface{}) error{
if cls.globalCfg == nil {
return errors.New("no service configuration found")
}
rv := reflect.ValueOf(cls.globalCfg)
if rv.Kind() == reflect.Ptr && rv.IsNil() {
return errors.New("no service configuration found")
}
bytes,err := json.Marshal(cls.globalCfg)
if err != nil {
return err
}
return json.Unmarshal(bytes,cfg)
}
func (cls *Cluster) GetNodeInfo(nodeId string) (NodeInfo,bool) {
cls.locker.RLock()
defer cls.locker.RUnlock()
nodeInfo,ok:= cls.mapRpc[nodeId]
if ok == false || nodeInfo == nil {
return NodeInfo{},false
}
return nodeInfo.nodeInfo,true
}
func (dc *Cluster) CanDiscoveryService(fromMasterNodeId string,serviceName string) bool{
canDiscovery := true
for i:=0;i<len(dc.GetLocalNodeInfo().DiscoveryService);i++{
masterNodeId := dc.GetLocalNodeInfo().DiscoveryService[i].MasterNodeId
//无效的配置,则跳过
if masterNodeId == rpc.NodeIdNull && len(dc.GetLocalNodeInfo().DiscoveryService[i].ServiceList)==0 {
continue
}
canDiscovery = false
if masterNodeId == fromMasterNodeId || masterNodeId == rpc.NodeIdNull {
for _,discoveryService := range dc.GetLocalNodeInfo().DiscoveryService[i].ServiceList {
if discoveryService == serviceName {
return true
}
}
}
}
return canDiscovery
}

View File

@@ -1,19 +1,21 @@
package cluster
import "github.com/duanhf2012/origin/v2/rpc"
type ConfigDiscovery struct {
funDelService FunDelNode
funSetService FunSetNodeInfo
localNodeId int
funDelNode FunDelNode
funSetNode FunSetNode
localNodeId string
}
func (discovery *ConfigDiscovery) InitDiscovery(localNodeId int,funDelNode FunDelNode,funSetNodeInfo FunSetNodeInfo) error{
func (discovery *ConfigDiscovery) InitDiscovery(localNodeId string,funDelNode FunDelNode,funSetNode FunSetNode) error{
discovery.localNodeId = localNodeId
discovery.funDelService = funDelNode
discovery.funSetService = funSetNodeInfo
discovery.funDelNode = funDelNode
discovery.funSetNode = funSetNode
//解析本地其他服务配置
_,nodeInfoList,err := GetCluster().readLocalClusterConfig(0)
_,nodeInfoList,_,err := GetCluster().readLocalClusterConfig(rpc.NodeIdNull)
if err != nil {
return err
}
@@ -23,12 +25,10 @@ func (discovery *ConfigDiscovery) InitDiscovery(localNodeId int,funDelNode FunDe
continue
}
discovery.funSetService(&nodeInfo)
discovery.funSetNode(&nodeInfo)
}
return nil
}
func (discovery *ConfigDiscovery) OnNodeStop(){
}

71
cluster/discovery.go Normal file
View File

@@ -0,0 +1,71 @@
package cluster
import (
"errors"
"github.com/duanhf2012/origin/v2/service"
)
func (cls *Cluster) setupDiscovery(localNodeId string, setupServiceFun SetupServiceFun) error{
if cls.discoveryInfo.getDiscoveryType() == OriginType { //origin类型服务发现
return cls.setupOriginDiscovery(localNodeId,setupServiceFun)
}else if cls.discoveryInfo.getDiscoveryType() == EtcdType{//etcd类型服务发现
return cls.setupEtcdDiscovery(localNodeId,setupServiceFun)
}
return cls.setupConfigDiscovery(localNodeId,setupServiceFun)
}
func (cls *Cluster) setupOriginDiscovery(localNodeId string, setupServiceFun SetupServiceFun) error{
if cls.serviceDiscovery != nil {
return errors.New("service discovery has been setup")
}
//1.如果没有配置DiscoveryNode配置则使用默认配置文件发现服务
localMaster, hasMaster := cls.checkOriginDiscovery(localNodeId)
if hasMaster == false {
return errors.New("no master node config")
}
cls.serviceDiscovery = getOriginDiscovery()
//2.如果为动态服务发现安装本地发现服务
if localMaster == true {
setupServiceFun(&masterService)
cls.AddDiscoveryService(OriginDiscoveryMasterName, false)
}
setupServiceFun(&clientService)
cls.AddDiscoveryService(OriginDiscoveryClientName, true)
return nil
}
func (cls *Cluster) setupEtcdDiscovery(localNodeId string, setupServiceFun SetupServiceFun) error{
if cls.serviceDiscovery != nil {
return errors.New("service discovery has been setup")
}
//setup etcd service
cls.serviceDiscovery = getEtcdDiscovery()
setupServiceFun(cls.serviceDiscovery.(service.IService))
cls.AddDiscoveryService(cls.serviceDiscovery.(service.IService).GetName(),false)
return nil
}
func (cls *Cluster) setupConfigDiscovery(localNodeId string, setupServiceFun SetupServiceFun) error{
if cls.serviceDiscovery != nil {
return errors.New("service discovery has been setup")
}
cls.serviceDiscovery = &ConfigDiscovery{}
return nil
}
func (cls *Cluster) GetOriginDiscovery() *OriginDiscovery {
return cls.discoveryInfo.Origin
}
func (cls *Cluster) GetEtcdDiscovery() *EtcdDiscovery {
return cls.discoveryInfo.Etcd
}

View File

@@ -1,397 +0,0 @@
package cluster
import (
"errors"
"github.com/duanhf2012/origin/log"
"github.com/duanhf2012/origin/rpc"
"github.com/duanhf2012/origin/service"
)
const DynamicDiscoveryMasterName = "DiscoveryMaster"
const DynamicDiscoveryClientName = "DiscoveryClient"
const RegServiceDiscover = DynamicDiscoveryMasterName + ".RPC_RegServiceDiscover"
const SubServiceDiscover = DynamicDiscoveryClientName + ".RPC_SubServiceDiscover"
const AddSubServiceDiscover = DynamicDiscoveryMasterName + ".RPC_AddSubServiceDiscover"
type DynamicDiscoveryMaster struct {
service.Service
mapNodeInfo map[int32]struct{}
nodeInfo []*rpc.NodeInfo
}
type DynamicDiscoveryClient struct {
service.Service
funDelService FunDelNode
funSetService FunSetNodeInfo
localNodeId int
mapDiscovery map[int32]map[int32]struct{} //map[masterNodeId]map[nodeId]struct{}
}
var masterService DynamicDiscoveryMaster
var clientService DynamicDiscoveryClient
func getDynamicDiscovery() IServiceDiscovery {
return &clientService
}
func init() {
masterService.SetName(DynamicDiscoveryMasterName)
clientService.SetName(DynamicDiscoveryClientName)
}
func (ds *DynamicDiscoveryMaster) isRegNode(nodeId int32) bool {
_, ok := ds.mapNodeInfo[nodeId]
return ok
}
func (ds *DynamicDiscoveryMaster) addNodeInfo(nodeInfo *rpc.NodeInfo) {
if len(nodeInfo.PublicServiceList) == 0 {
return
}
_, ok := ds.mapNodeInfo[nodeInfo.NodeId]
if ok == true {
return
}
ds.mapNodeInfo[nodeInfo.NodeId] = struct{}{}
ds.nodeInfo = append(ds.nodeInfo, nodeInfo)
}
func (ds *DynamicDiscoveryMaster) OnInit() error {
ds.mapNodeInfo = make(map[int32]struct{}, 20)
ds.RegRpcListener(ds)
return nil
}
func (ds *DynamicDiscoveryMaster) OnStart() {
var nodeInfo rpc.NodeInfo
localNodeInfo := cluster.GetLocalNodeInfo()
if localNodeInfo.Private == true {
return
}
nodeInfo.NodeId = int32(localNodeInfo.NodeId)
nodeInfo.NodeName = localNodeInfo.NodeName
nodeInfo.ListenAddr = localNodeInfo.ListenAddr
nodeInfo.PublicServiceList = localNodeInfo.PublicServiceList
nodeInfo.MaxRpcParamLen = localNodeInfo.MaxRpcParamLen
ds.addNodeInfo(&nodeInfo)
}
func (ds *DynamicDiscoveryMaster) OnNodeConnected(nodeId int) {
//没注册过结点不通知
if ds.isRegNode(int32(nodeId)) == false {
return
}
//向它发布所有服务列表信息
var notifyDiscover rpc.SubscribeDiscoverNotify
notifyDiscover.IsFull = true
notifyDiscover.NodeInfo = ds.nodeInfo
notifyDiscover.MasterNodeId = int32(cluster.GetLocalNodeInfo().NodeId)
ds.GoNode(nodeId, SubServiceDiscover, &notifyDiscover)
}
func (ds *DynamicDiscoveryMaster) OnNodeDisconnect(nodeId int) {
if ds.isRegNode(int32(nodeId)) == false {
return
}
var notifyDiscover rpc.SubscribeDiscoverNotify
notifyDiscover.MasterNodeId = int32(cluster.GetLocalNodeInfo().NodeId)
notifyDiscover.DelNodeId = int32(nodeId)
//删除结点
cluster.DelNode(nodeId, true)
//无注册过的结点不广播避免非当前Master网络中的连接断开时通知到本网络
ds.CastGo(SubServiceDiscover, &notifyDiscover)
}
func (ds *DynamicDiscoveryMaster) RpcCastGo(serviceMethod string, args interface{}) {
for nodeId, _ := range ds.mapNodeInfo {
ds.GoNode(int(nodeId), serviceMethod, args)
}
}
// 收到注册过来的结点
func (ds *DynamicDiscoveryMaster) RPC_RegServiceDiscover(req *rpc.ServiceDiscoverReq, res *rpc.Empty) error {
if req.NodeInfo == nil {
err := errors.New("RPC_RegServiceDiscover req is error.")
log.SError(err.Error())
return err
}
//广播给其他所有结点
var notifyDiscover rpc.SubscribeDiscoverNotify
notifyDiscover.MasterNodeId = int32(cluster.GetLocalNodeInfo().NodeId)
notifyDiscover.NodeInfo = append(notifyDiscover.NodeInfo, req.NodeInfo)
ds.RpcCastGo(SubServiceDiscover, &notifyDiscover)
//存入本地
ds.addNodeInfo(req.NodeInfo)
//初始化结点信息
var nodeInfo NodeInfo
nodeInfo.NodeId = int(req.NodeInfo.NodeId)
nodeInfo.NodeName = req.NodeInfo.NodeName
nodeInfo.Private = req.NodeInfo.Private
nodeInfo.ServiceList = req.NodeInfo.PublicServiceList
nodeInfo.PublicServiceList = req.NodeInfo.PublicServiceList
nodeInfo.ListenAddr = req.NodeInfo.ListenAddr
nodeInfo.MaxRpcParamLen = req.NodeInfo.MaxRpcParamLen
//主动删除已经存在的结点,确保先断开,再连接
cluster.serviceDiscoveryDelNode(nodeInfo.NodeId, true)
//加入到本地Cluster模块中将连接该结点
cluster.serviceDiscoverySetNodeInfo(&nodeInfo)
return nil
}
func (dc *DynamicDiscoveryClient) OnInit() error {
dc.RegRpcListener(dc)
dc.mapDiscovery = map[int32]map[int32]struct{}{}
return nil
}
func (dc *DynamicDiscoveryClient) addMasterNode(masterNodeId int32, nodeId int32) {
_, ok := dc.mapDiscovery[masterNodeId]
if ok == false {
dc.mapDiscovery[masterNodeId] = map[int32]struct{}{}
}
dc.mapDiscovery[masterNodeId][nodeId] = struct{}{}
}
func (dc *DynamicDiscoveryClient) removeMasterNode(masterNodeId int32, nodeId int32) {
mapNodeId, ok := dc.mapDiscovery[masterNodeId]
if ok == false {
return
}
delete(mapNodeId, nodeId)
}
func (dc *DynamicDiscoveryClient) findNodeId(nodeId int32) bool {
for _, mapNodeId := range dc.mapDiscovery {
_, ok := mapNodeId[nodeId]
if ok == true {
return true
}
}
return false
}
func (dc *DynamicDiscoveryClient) OnStart() {
//2.添加并连接发现主结点
dc.addDiscoveryMaster()
}
func (dc *DynamicDiscoveryClient) addDiscoveryMaster() {
discoveryNodeList := cluster.GetDiscoveryNodeList()
for i := 0; i < len(discoveryNodeList); i++ {
if discoveryNodeList[i].NodeId == cluster.GetLocalNodeInfo().NodeId {
continue
}
dc.funSetService(&discoveryNodeList[i])
}
}
func (dc *DynamicDiscoveryClient) fullCompareDiffNode(masterNodeId int32, mapNodeInfo map[int32]*rpc.NodeInfo) []int32 {
if mapNodeInfo == nil {
return nil
}
diffNodeIdSlice := make([]int32, 0, len(mapNodeInfo))
mapNodeId := map[int32]struct{}{}
mapNodeId, ok := dc.mapDiscovery[masterNodeId]
if ok == false {
return nil
}
//本地任何Master都不存在的放到diffNodeIdSlice
for nodeId, _ := range mapNodeId {
_, ok := mapNodeInfo[nodeId]
if ok == false {
diffNodeIdSlice = append(diffNodeIdSlice, nodeId)
}
}
return diffNodeIdSlice
}
//订阅发现的服务通知
func (dc *DynamicDiscoveryClient) RPC_SubServiceDiscover(req *rpc.SubscribeDiscoverNotify) error {
//整理当前master结点需要筛选的NeighborService
masterDiscoveryNodeInfo := cluster.GetMasterDiscoveryNodeInfo(int(req.MasterNodeId))
mapMasterDiscoveryService := map[string]struct{}{}
if masterDiscoveryNodeInfo != nil {
for i := 0; i < len(masterDiscoveryNodeInfo.NeighborService); i++ {
mapMasterDiscoveryService[masterDiscoveryNodeInfo.NeighborService[i]] = struct{}{}
}
}
mapNodeInfo := map[int32]*rpc.NodeInfo{}
for _, nodeInfo := range req.NodeInfo {
//不对本地结点或者不存在任何公开服务的结点
if int(nodeInfo.NodeId) == dc.localNodeId {
continue
}
if cluster.IsMasterDiscoveryNode() == false && len(nodeInfo.PublicServiceList) == 1 &&
nodeInfo.PublicServiceList[0] == DynamicDiscoveryClientName {
continue
}
//遍历所有的公开服务,并筛选之
for _, serviceName := range nodeInfo.PublicServiceList {
//只有存在配置时才做筛选
if len(mapMasterDiscoveryService) > 0 {
if _, ok := mapMasterDiscoveryService[serviceName]; ok == false {
continue
}
}
nInfo := mapNodeInfo[nodeInfo.NodeId]
if nInfo == nil {
nInfo = &rpc.NodeInfo{}
nInfo.NodeId = nodeInfo.NodeId
nInfo.NodeName = nodeInfo.NodeName
nInfo.ListenAddr = nodeInfo.ListenAddr
nInfo.MaxRpcParamLen = nodeInfo.MaxRpcParamLen
mapNodeInfo[nodeInfo.NodeId] = nInfo
}
nInfo.PublicServiceList = append(nInfo.PublicServiceList, serviceName)
}
}
//如果为完整同步,则找出差异的结点
var willDelNodeId []int32
//如果不是邻居结点,则做筛选
if req.IsFull == true {
diffNode := dc.fullCompareDiffNode(req.MasterNodeId, mapNodeInfo)
if len(diffNode) > 0 {
willDelNodeId = append(willDelNodeId, diffNode...)
}
}
//指定删除结点
if req.DelNodeId > 0 && req.DelNodeId != int32(dc.localNodeId) {
willDelNodeId = append(willDelNodeId, req.DelNodeId)
}
//删除不必要的结点
for _, nodeId := range willDelNodeId {
dc.removeMasterNode(req.MasterNodeId, int32(nodeId))
if dc.findNodeId(nodeId) == false {
dc.funDelService(int(nodeId), false)
}
}
//设置新结点
for _, nodeInfo := range mapNodeInfo {
dc.addMasterNode(req.MasterNodeId, nodeInfo.NodeId)
dc.setNodeInfo(nodeInfo)
}
return nil
}
func (dc *DynamicDiscoveryClient) isDiscoverNode(nodeId int) bool {
for i := 0; i < len(cluster.masterDiscoveryNodeList); i++ {
if cluster.masterDiscoveryNodeList[i].NodeId == nodeId {
return true
}
}
return false
}
func (dc *DynamicDiscoveryClient) OnNodeConnected(nodeId int) {
nodeInfo := cluster.GetMasterDiscoveryNodeInfo(nodeId)
if nodeInfo == nil {
return
}
var req rpc.ServiceDiscoverReq
req.NodeInfo = &rpc.NodeInfo{}
req.NodeInfo.NodeId = int32(cluster.localNodeInfo.NodeId)
req.NodeInfo.NodeName = cluster.localNodeInfo.NodeName
req.NodeInfo.ListenAddr = cluster.localNodeInfo.ListenAddr
req.NodeInfo.MaxRpcParamLen = cluster.localNodeInfo.MaxRpcParamLen
//MasterDiscoveryNode配置中没有配置NeighborService则同步当前结点所有服务
if len(nodeInfo.NeighborService) == 0 {
req.NodeInfo.PublicServiceList = cluster.localNodeInfo.PublicServiceList
} else {
req.NodeInfo.PublicServiceList = append(req.NodeInfo.PublicServiceList, DynamicDiscoveryClientName)
}
//向Master服务同步本Node服务信息
err := dc.AsyncCallNode(nodeId, RegServiceDiscover, &req, func(res *rpc.Empty, err error) {
if err != nil {
log.SError("call ", RegServiceDiscover, " is fail :", err.Error())
return
}
})
if err != nil {
log.SError("call ", RegServiceDiscover, " is fail :", err.Error())
}
}
func (dc *DynamicDiscoveryClient) setNodeInfo(nodeInfo *rpc.NodeInfo) {
if nodeInfo == nil || nodeInfo.Private == true || int(nodeInfo.NodeId) == dc.localNodeId {
return
}
//筛选关注的服务
localNodeInfo := cluster.GetLocalNodeInfo()
if len(localNodeInfo.DiscoveryService) > 0 {
var discoverServiceSlice = make([]string, 0, 24)
for _, pubService := range nodeInfo.PublicServiceList {
for _, discoverService := range localNodeInfo.DiscoveryService {
if pubService == discoverService {
discoverServiceSlice = append(discoverServiceSlice, pubService)
}
}
}
nodeInfo.PublicServiceList = discoverServiceSlice
}
if len(nodeInfo.PublicServiceList) == 0 {
return
}
var nInfo NodeInfo
nInfo.ServiceList = nodeInfo.PublicServiceList
nInfo.PublicServiceList = nodeInfo.PublicServiceList
nInfo.NodeId = int(nodeInfo.NodeId)
nInfo.NodeName = nodeInfo.NodeName
nInfo.ListenAddr = nodeInfo.ListenAddr
nInfo.MaxRpcParamLen = nodeInfo.MaxRpcParamLen
dc.funSetService(&nInfo)
}
func (dc *DynamicDiscoveryClient) OnNodeDisconnect(nodeId int) {
//将Discard结点清理
cluster.DiscardNode(nodeId)
}
func (dc *DynamicDiscoveryClient) InitDiscovery(localNodeId int, funDelNode FunDelNode, funSetNodeInfo FunSetNodeInfo) error {
dc.localNodeId = localNodeId
dc.funDelService = funDelNode
dc.funSetService = funSetNodeInfo
return nil
}
func (dc *DynamicDiscoveryClient) OnNodeStop() {
}

521
cluster/etcddiscovery.go Normal file
View File

@@ -0,0 +1,521 @@
package cluster
import (
"github.com/duanhf2012/origin/v2/event"
"github.com/duanhf2012/origin/v2/log"
"github.com/duanhf2012/origin/v2/rpc"
"github.com/duanhf2012/origin/v2/service"
"github.com/duanhf2012/origin/v2/util/timer"
"go.etcd.io/etcd/api/v3/mvccpb"
"go.etcd.io/etcd/client/v3"
"google.golang.org/protobuf/proto"
"time"
"context"
"errors"
"fmt"
"go.uber.org/zap"
"path"
"runtime"
"strings"
"sync/atomic"
)
const originDir = "/origin"
const testKey = originDir+"/_inner/_test_7501f3ed-b716-44c2-0090-fc1ed0166d7a"
type etcdClientInfo struct {
watchKeys []string
leaseID clientv3.LeaseID
keepAliveChan <-chan *clientv3.LeaseKeepAliveResponse
}
type EtcdDiscoveryService struct {
service.Service
funDelNode FunDelNode
funSetNode FunSetNode
localNodeId string
byteLocalNodeInfo string
mapClient map[*clientv3.Client]*etcdClientInfo
isClose int32
bRetire bool
mapDiscoveryNodeId map[string]map[string]struct{} //map[networkName]map[nodeId]
}
func getEtcdDiscovery() (IServiceDiscovery) {
etcdDiscovery := &EtcdDiscoveryService{}
return etcdDiscovery
}
func (ed *EtcdDiscoveryService) InitDiscovery(localNodeId string,funDelNode FunDelNode,funSetNode FunSetNode) error {
ed.localNodeId = localNodeId
ed.funDelNode = funDelNode
ed.funSetNode = funSetNode
return nil
}
const(
eeGets = 0
eePut = 1
eeDelete = 2
)
type etcdDiscoveryEvent struct {
typ int
watchKey string
Kvs []*mvccpb.KeyValue
}
func (ee *etcdDiscoveryEvent) GetEventType() event.EventType{
return event.Sys_Event_EtcdDiscovery
}
func (ed *EtcdDiscoveryService) OnInit() error {
ed.mapClient = make(map[*clientv3.Client]*etcdClientInfo,1)
ed.mapDiscoveryNodeId = make(map[string]map[string]struct{})
ed.GetEventProcessor().RegEventReceiverFunc(event.Sys_Event_EtcdDiscovery, ed.GetEventHandler(), ed.OnEtcdDiscovery)
err := ed.marshalNodeInfo()
if err != nil {
return err
}
etcdDiscoveryCfg := cluster.GetEtcdDiscovery()
if etcdDiscoveryCfg == nil {
return errors.New("etcd discovery config is nil.")
}
for i:=0;i<len(etcdDiscoveryCfg.EtcdList);i++{
client, cerr := clientv3.New(clientv3.Config{
Endpoints: etcdDiscoveryCfg.EtcdList[i].Endpoints,
DialTimeout: etcdDiscoveryCfg.DialTimeoutMillisecond,
Logger: zap.NewNop(),
})
if cerr != nil {
log.Error("etcd discovery init fail",log.ErrorAttr("err",cerr))
return cerr
}
ctx,_:=context.WithTimeout(context.Background(),time.Second*3)
_,err = client.Leases(ctx)
if err != nil {
log.Error("etcd discovery init fail",log.Any("endpoint",etcdDiscoveryCfg.EtcdList[i].Endpoints),log.ErrorAttr("err",err))
return err
}
ec := &etcdClientInfo{}
for _, networkName := range etcdDiscoveryCfg.EtcdList[i].NetworkName {
ec.watchKeys = append(ec.watchKeys,fmt.Sprintf("%s/%s",originDir,networkName))
}
ed.mapClient[client] = ec
}
return nil
}
func (ed *EtcdDiscoveryService) getRegisterKey(watchkey string) string{
return watchkey+"/"+ed.localNodeId
}
func (ed *EtcdDiscoveryService) registerServiceByClient(client *clientv3.Client,etcdClient *etcdClientInfo) {
// 创建租约
var err error
var resp *clientv3.LeaseGrantResponse
resp, err = client.Grant(context.Background(), cluster.GetEtcdDiscovery().TTLSecond)
if err != nil {
log.Error("etcd registerService fail",log.ErrorAttr("err",err))
ed.tryRegisterService(client,etcdClient)
return
}
etcdClient.leaseID = resp.ID
for _,watchKey:= range etcdClient.watchKeys {
// 注册服务节点到 etcd
_, err = client.Put(context.Background(), ed.getRegisterKey(watchKey), ed.byteLocalNodeInfo, clientv3.WithLease(resp.ID))
if err != nil {
log.Error("etcd Put fail",log.ErrorAttr("err",err))
ed.tryRegisterService(client,etcdClient)
return
}
}
etcdClient.keepAliveChan,err = client.KeepAlive(context.Background(), etcdClient.leaseID)
if err != nil {
log.Error("etcd KeepAlive fail",log.ErrorAttr("err",err))
ed.tryRegisterService(client,etcdClient)
return
}
go func() {
for {
select {
case _, ok := <-etcdClient.keepAliveChan:
//log.Debug("ok",log.Any("addr",client.Endpoints()))
if !ok {
log.Error("etcd keepAliveChan fail",log.Any("watchKeys",etcdClient.watchKeys))
ed.tryRegisterService(client,etcdClient)
return
}
}
}
}()
}
func (ed *EtcdDiscoveryService) tryRegisterService(client *clientv3.Client,etcdClient *etcdClientInfo){
if ed.isStop() {
return
}
ed.AfterFunc(time.Second*3, func(t *timer.Timer) {
ed.registerServiceByClient(client,etcdClient)
})
}
func (ed *EtcdDiscoveryService) tryWatch(client *clientv3.Client,etcdClient *etcdClientInfo) {
if ed.isStop() {
return
}
ed.AfterFunc(time.Second*3, func(t *timer.Timer) {
ed.watchByClient(client,etcdClient)
})
}
func (ed *EtcdDiscoveryService) tryLaterRetire() {
ed.AfterFunc(time.Second, func(*timer.Timer) {
if ed.retire() != nil {
ed.tryLaterRetire()
}
})
}
func (ed *EtcdDiscoveryService) retire() error{
//从etcd中更新
for c,ec := range ed.mapClient {
for _, watchKey := range ec.watchKeys {
// 注册服务节点到 etcd
_, err := c.Put(context.Background(), ed.getRegisterKey(watchKey), ed.byteLocalNodeInfo, clientv3.WithLease(ec.leaseID))
if err != nil {
log.Error("etcd Put fail", log.ErrorAttr("err", err))
return err
}
}
}
return nil
}
func (ed *EtcdDiscoveryService) OnRetire(){
ed.bRetire = true
ed.marshalNodeInfo()
if ed.retire()!= nil {
ed.tryLaterRetire()
}
}
func (ed *EtcdDiscoveryService) OnRelease(){
atomic.StoreInt32(&ed.isClose,1)
ed.close()
}
func (ed *EtcdDiscoveryService) isStop() bool{
return atomic.LoadInt32(&ed.isClose) == 1
}
func (nd *EtcdDiscoveryService) OnStart() {
for c, ec := range nd.mapClient {
nd.tryRegisterService(c,ec)
nd.tryWatch(c,ec)
}
}
func (ed *EtcdDiscoveryService) marshalNodeInfo() error{
nInfo := cluster.GetLocalNodeInfo()
var nodeInfo rpc.NodeInfo
nodeInfo.NodeId = nInfo.NodeId
nodeInfo.ListenAddr = nInfo.ListenAddr
nodeInfo.Retire = ed.bRetire
nodeInfo.PublicServiceList = nInfo.PublicServiceList
nodeInfo.MaxRpcParamLen = nInfo.MaxRpcParamLen
byteLocalNodeInfo,err := proto.Marshal(&nodeInfo)
if err ==nil{
ed.byteLocalNodeInfo = string(byteLocalNodeInfo)
}
return err
}
func (ed *EtcdDiscoveryService) setNodeInfo(networkName string,nodeInfo *rpc.NodeInfo) bool{
if nodeInfo == nil || nodeInfo.Private == true || nodeInfo.NodeId == ed.localNodeId {
return false
}
//筛选关注的服务
var discoverServiceSlice = make([]string, 0, 24)
for _, pubService := range nodeInfo.PublicServiceList {
if cluster.CanDiscoveryService(networkName,pubService) == true {
discoverServiceSlice = append(discoverServiceSlice,pubService)
}
}
if len(discoverServiceSlice) == 0 {
return false
}
var nInfo NodeInfo
nInfo.ServiceList = discoverServiceSlice
nInfo.PublicServiceList = discoverServiceSlice
nInfo.NodeId = nodeInfo.NodeId
nInfo.ListenAddr = nodeInfo.ListenAddr
nInfo.MaxRpcParamLen = nodeInfo.MaxRpcParamLen
nInfo.Retire = nodeInfo.Retire
nInfo.Private = nodeInfo.Private
ed.funSetNode(&nInfo)
return true
}
func (ed *EtcdDiscoveryService) close(){
for c, ec := range ed.mapClient {
if _, err := c.Revoke(context.Background(), ec.leaseID); err != nil {
log.Error("etcd Revoke fail",log.ErrorAttr("err",err))
}
c.Watcher.Close()
err := c.Close()
if err != nil {
log.Error("etcd Close fail",log.ErrorAttr("err",err))
}
}
}
func (ed *EtcdDiscoveryService) getServices(client *clientv3.Client,etcdClient *etcdClientInfo,watchKey string) bool {
// 根据前缀获取现有的key
resp, err := client.Get(context.Background(), watchKey, clientv3.WithPrefix())
if err != nil {
log.Error("etcd Get fail", log.ErrorAttr("err", err))
ed.tryWatch(client, etcdClient)
return false
}
// 遍历获取得到的k和v
ed.notifyGets(watchKey,resp.Kvs)
return true
}
func (ed *EtcdDiscoveryService) watchByClient(client *clientv3.Client,etcdClient *etcdClientInfo){
//先关闭所有的watcher
for _, watchKey := range etcdClient.watchKeys {
// 监视前缀修改变更server
go ed.watcher(client,etcdClient, watchKey)
}
}
// watcher 监听Key的前缀
func (ed *EtcdDiscoveryService) watcher(client *clientv3.Client,etcdClient *etcdClientInfo,watchKey string) {
defer func() {
if r := recover(); r != nil {
buf := make([]byte, 4096)
l := runtime.Stack(buf, false)
errString := fmt.Sprint(r)
log.Dump(string(buf[:l]),log.String("error",errString))
ed.tryWatch(client,etcdClient)
}
}()
rch := client.Watch(context.Background(), watchKey, clientv3.WithPrefix())
if ed.getServices(client,etcdClient,watchKey) == false {
return
}
for wresp := range rch {
for _, ev := range wresp.Events {
switch ev.Type {
case clientv3.EventTypePut: // 修改或者新增
ed.notifyPut(watchKey,ev.Kv)
case clientv3.EventTypeDelete: // 删除
ed.notifyDelete(watchKey,ev.Kv)
}
}
}
ed.tryWatch(client,etcdClient)
}
func (ed *EtcdDiscoveryService) setNode(netWorkName string,byteNode []byte) string{
var nodeInfo rpc.NodeInfo
err := proto.Unmarshal(byteNode,&nodeInfo)
if err != nil {
log.Error("Unmarshal fail",log.String("netWorkName",netWorkName),log.ErrorAttr("err",err))
return ""
}
ed.setNodeInfo(netWorkName,&nodeInfo)
return nodeInfo.NodeId
}
func (ed *EtcdDiscoveryService) delNode(fullKey string) string{
nodeId := ed.getNodeId(fullKey)
if nodeId == ed.localNodeId {
return ""
}
ed.funDelNode(nodeId)
return nodeId
}
func (ed *EtcdDiscoveryService) getNetworkNameByWatchKey(watchKey string) string{
return watchKey[strings.LastIndex(watchKey,"/")+1:]
}
func (ed *EtcdDiscoveryService) getNetworkNameByFullKey(fullKey string) string{
return fullKey[len(originDir)+1:strings.LastIndex(fullKey,"/")]
}
func (ed *EtcdDiscoveryService) getNodeId(fullKey string) string{
return fullKey[strings.LastIndex(fullKey,"/")+1:]
}
func (ed *EtcdDiscoveryService) OnEtcdDiscovery(ev event.IEvent){
disEvent := ev.(*etcdDiscoveryEvent)
switch disEvent.typ {
case eeGets:
ed.OnEventGets(disEvent.watchKey,disEvent.Kvs)
case eePut:
if len(disEvent.Kvs) == 1 {
ed.OnEventPut(disEvent.watchKey,disEvent.Kvs[0])
}
case eeDelete:
if len(disEvent.Kvs) == 1 {
ed.OnEventDelete(disEvent.watchKey,disEvent.Kvs[0])
}
}
}
func (ed *EtcdDiscoveryService) notifyGets(watchKey string,Kvs []*mvccpb.KeyValue) {
var ev etcdDiscoveryEvent
ev.typ = eeGets
ev.watchKey = watchKey
ev.Kvs = Kvs
ed.NotifyEvent(&ev)
}
func (ed *EtcdDiscoveryService) notifyPut(watchKey string,Kvs *mvccpb.KeyValue) {
var ev etcdDiscoveryEvent
ev.typ = eePut
ev.watchKey = watchKey
ev.Kvs = append(ev.Kvs,Kvs)
ed.NotifyEvent(&ev)
}
func (ed *EtcdDiscoveryService) notifyDelete(watchKey string,Kvs *mvccpb.KeyValue) {
var ev etcdDiscoveryEvent
ev.typ = eeDelete
ev.watchKey = watchKey
ev.Kvs = append(ev.Kvs,Kvs)
ed.NotifyEvent(&ev)
}
func (ed *EtcdDiscoveryService) OnEventGets(watchKey string,Kvs []*mvccpb.KeyValue) {
mapNode := make(map[string]struct{},32)
for _, kv := range Kvs {
nodeId := ed.setNode(ed.getNetworkNameByFullKey(string(kv.Key)), kv.Value)
mapNode[nodeId] = struct{}{}
ed.addNodeId(watchKey,nodeId)
}
// 此段代码为遍历并删除过期节点的逻辑。
// 对于mapDiscoveryNodeId中与watchKey关联的所有节点ID遍历该集合。
// 如果某个节点ID不在mapNode中且不是本地节点ID则调用funDelNode函数删除该节点。
mapLastNodeId := ed.mapDiscoveryNodeId[watchKey] // 根据watchKey获取对应的节点ID集合
for nodeId := range mapLastNodeId { // 遍历所有节点ID
if _,ok := mapNode[nodeId];ok == false && nodeId != ed.localNodeId { // 检查节点是否不存在于mapNode且不是本地节点
ed.funDelNode(nodeId) // 调用函数删除该节点
delete(ed.mapDiscoveryNodeId[watchKey],nodeId)
}
}
}
func (ed *EtcdDiscoveryService) OnEventPut(watchKey string,Kv *mvccpb.KeyValue) {
nodeId := ed.setNode(ed.getNetworkNameByFullKey(string(Kv.Key)), Kv.Value)
ed.addNodeId(watchKey,nodeId)
}
func (ed *EtcdDiscoveryService) OnEventDelete(watchKey string,Kv *mvccpb.KeyValue) {
nodeId := ed.delNode(string(Kv.Key))
delete(ed.mapDiscoveryNodeId[watchKey],nodeId)
}
func (ed *EtcdDiscoveryService) addNodeId(watchKey string,nodeId string) {
if _,ok := ed.mapDiscoveryNodeId[watchKey];ok == false {
ed.mapDiscoveryNodeId[watchKey] = make(map[string]struct{})
}
ed.mapDiscoveryNodeId[watchKey][nodeId] = struct{}{}
}
func (ed *EtcdDiscoveryService) OnNodeDisconnect(nodeId string) {
//将Discard结点清理
cluster.DiscardNode(nodeId)
}
func (ed *EtcdDiscoveryService) RPC_ServiceRecord(etcdServiceRecord *service.EtcdServiceRecordEvent,empty *service.Empty) error{
var client *clientv3.Client
//写入到etcd中
for c, info := range ed.mapClient{
for _,watchKey := range info.watchKeys {
if ed.getNetworkNameByWatchKey(watchKey) == etcdServiceRecord.NetworkName {
client = c
break
}
}
}
if client == nil {
log.Error("etcd record fail,cannot find network name",log.String("networkName",etcdServiceRecord.NetworkName))
return errors.New("annot find network name")
}
var lg *clientv3.LeaseGrantResponse
var err error
if etcdServiceRecord.TTLSecond > 0 {
ctx,_:=context.WithTimeout(context.Background(),time.Second*3)
lg, err = client.Grant(ctx, etcdServiceRecord.TTLSecond)
if err != nil {
log.Error("etcd record fail,cannot grant lease",log.ErrorAttr("err",err))
return errors.New("cannot grant lease")
}
}
if lg != nil {
ctx,_:=context.WithTimeout(context.Background(),time.Second*3)
_, err = client.Put(ctx, path.Join(originDir,etcdServiceRecord.RecordKey),etcdServiceRecord.RecordInfo, clientv3.WithLease(lg.ID))
if err != nil {
log.Error("etcd record fail,cannot put record",log.ErrorAttr("err",err))
}
return errors.New("cannot put record")
}
_,err = client.Put(context.Background(), path.Join(originDir,etcdServiceRecord.RecordKey),etcdServiceRecord.RecordInfo)
if err != nil {
log.Error("etcd record fail,cannot put record",log.ErrorAttr("err",err))
return errors.New("cannot put record")
}
return nil
}

71
cluster/nodettl.go Normal file
View File

@@ -0,0 +1,71 @@
package cluster
import (
"time"
"container/list"
)
type nodeTTL struct {
nodeId string
refreshTime time.Time
}
type nodeSetTTL struct {
l *list.List
mapElement map[string]*list.Element
ttl time.Duration
}
func (ns *nodeSetTTL) init(ttl time.Duration) {
ns.ttl = ttl
ns.mapElement = make(map[string]*list.Element,32)
ns.l = list.New()
}
func (ns *nodeSetTTL) removeNode(nodeId string) {
ele,ok:=ns.mapElement[nodeId]
if ok == false {
return
}
ns.l.Remove(ele)
delete(ns.mapElement,nodeId)
}
func (ns *nodeSetTTL) addAndRefreshNode(nodeId string){
ele,ok:=ns.mapElement[nodeId]
if ok == false {
ele = ns.l.PushBack(nodeId)
ele.Value = &nodeTTL{nodeId,time.Now()}
ns.mapElement[nodeId] = ele
return
}
ele.Value.(*nodeTTL).refreshTime = time.Now()
ns.l.MoveToBack(ele)
}
func (ns *nodeSetTTL) checkTTL(cb func(nodeIdList []string)){
nodeIdList := []string{}
for{
f := ns.l.Front()
if f == nil {
break
}
nt := f.Value.(*nodeTTL)
if time.Now().Sub(nt.refreshTime) > ns.ttl {
nodeIdList = append(nodeIdList,nt.nodeId)
}else{
break
}
//删除结点
ns.l.Remove(f)
delete(ns.mapElement,nt.nodeId)
}
if len(nodeIdList) >0 {
cb(nodeIdList)
}
}

640
cluster/origindiscovery.go Normal file
View File

@@ -0,0 +1,640 @@
package cluster
import (
"errors"
"github.com/duanhf2012/origin/v2/log"
"github.com/duanhf2012/origin/v2/rpc"
"github.com/duanhf2012/origin/v2/service"
"github.com/duanhf2012/origin/v2/util/timer"
"google.golang.org/protobuf/proto"
"time"
)
const OriginDiscoveryMasterName = "DiscoveryMaster"
const OriginDiscoveryClientName = "DiscoveryClient"
const RegServiceDiscover = OriginDiscoveryMasterName + ".RPC_RegServiceDiscover"
const SubServiceDiscover = OriginDiscoveryClientName + ".RPC_SubServiceDiscover"
const AddSubServiceDiscover = OriginDiscoveryMasterName + ".RPC_AddSubServiceDiscover"
const NodeRetireRpcMethod = OriginDiscoveryMasterName+".RPC_NodeRetire"
const RpcPingMethod = OriginDiscoveryMasterName+".RPC_Ping"
const UnRegServiceDiscover = OriginDiscoveryMasterName+".RPC_UnRegServiceDiscover"
type OriginDiscoveryMaster struct {
service.Service
mapNodeInfo map[string]struct{}
nodeInfo []*rpc.NodeInfo
nsTTL nodeSetTTL
}
type OriginDiscoveryClient struct {
service.Service
funDelNode FunDelNode
funSetNode FunSetNode
localNodeId string
mapDiscovery map[string]map[string][]string //map[masterNodeId]map[nodeId]struct{}
bRetire bool
isRegisterOk bool
}
var masterService OriginDiscoveryMaster
var clientService OriginDiscoveryClient
func getOriginDiscovery() IServiceDiscovery {
return &clientService
}
func init() {
masterService.SetName(OriginDiscoveryMasterName)
clientService.SetName(OriginDiscoveryClientName)
}
func (ds *OriginDiscoveryMaster) isRegNode(nodeId string) bool {
_, ok := ds.mapNodeInfo[nodeId]
return ok
}
func (ds *OriginDiscoveryMaster) updateNodeInfo(nInfo *rpc.NodeInfo) {
if _,ok:= ds.mapNodeInfo[nInfo.NodeId];ok == false {
return
}
nodeInfo := proto.Clone(nInfo).(*rpc.NodeInfo)
for i:=0;i<len(ds.nodeInfo);i++ {
if ds.nodeInfo[i].NodeId == nodeInfo.NodeId {
ds.nodeInfo[i] = nodeInfo
break
}
}
}
func (ds *OriginDiscoveryMaster) addNodeInfo(nInfo *rpc.NodeInfo) {
if len(nInfo.PublicServiceList) == 0 {
return
}
_, ok := ds.mapNodeInfo[nInfo.NodeId]
if ok == true {
return
}
ds.mapNodeInfo[nInfo.NodeId] = struct{}{}
nodeInfo := proto.Clone(nInfo).(*rpc.NodeInfo)
ds.nodeInfo = append(ds.nodeInfo, nodeInfo)
}
func (ds *OriginDiscoveryMaster) removeNodeInfo(nodeId string) {
if _,ok:= ds.mapNodeInfo[nodeId];ok == false {
return
}
for i:=0;i<len(ds.nodeInfo);i++ {
if ds.nodeInfo[i].NodeId == nodeId {
ds.nodeInfo = append(ds.nodeInfo[:i],ds.nodeInfo[i+1:]...)
break
}
}
ds.nsTTL.removeNode(nodeId)
delete(ds.mapNodeInfo,nodeId)
}
func (ds *OriginDiscoveryMaster) OnInit() error {
ds.mapNodeInfo = make(map[string]struct{}, 20)
ds.RegNodeConnListener(ds)
ds.RegNatsConnListener(ds)
ds.nsTTL.init(time.Duration(cluster.GetOriginDiscovery().TTLSecond)*time.Second)
return nil
}
func (ds *OriginDiscoveryMaster) checkTTL(){
if cluster.IsNatsMode() == false {
return
}
interval := time.Duration(cluster.GetOriginDiscovery().TTLSecond)*time.Second
interval = interval /3 /2
if interval < time.Second {
interval = time.Second
}
ds.NewTicker(interval,func(t *timer.Ticker){
ds.nsTTL.checkTTL(func(nodeIdList []string) {
for _,nodeId := range nodeIdList {
log.Info("TTL expiry",log.String("nodeId",nodeId))
ds.OnNodeDisconnect(nodeId)
}
})
})
}
func (ds *OriginDiscoveryMaster) OnStart() {
var nodeInfo rpc.NodeInfo
localNodeInfo := cluster.GetLocalNodeInfo()
nodeInfo.NodeId = localNodeInfo.NodeId
nodeInfo.ListenAddr = localNodeInfo.ListenAddr
nodeInfo.PublicServiceList = localNodeInfo.PublicServiceList
nodeInfo.MaxRpcParamLen = localNodeInfo.MaxRpcParamLen
nodeInfo.Private = localNodeInfo.Private
nodeInfo.Retire = localNodeInfo.Retire
ds.addNodeInfo(&nodeInfo)
ds.checkTTL()
}
func (dc *OriginDiscoveryMaster) OnNatsConnected(){
//向所有的节点同步服务发现信息
var notifyDiscover rpc.SubscribeDiscoverNotify
notifyDiscover.IsFull = true
notifyDiscover.NodeInfo = dc.nodeInfo
notifyDiscover.MasterNodeId = cluster.GetLocalNodeInfo().NodeId
dc.RpcCastGo(SubServiceDiscover, &notifyDiscover)
}
func (dc *OriginDiscoveryMaster) OnNatsDisconnect(){
}
func (ds *OriginDiscoveryMaster) OnNodeConnected(nodeId string) {
}
func (ds *OriginDiscoveryMaster) OnNodeDisconnect(nodeId string) {
if ds.isRegNode(nodeId) == false {
return
}
ds.removeNodeInfo(nodeId)
//主动删除已经存在的结点,确保先断开,再连接
var notifyDiscover rpc.SubscribeDiscoverNotify
notifyDiscover.MasterNodeId = cluster.GetLocalNodeInfo().NodeId
notifyDiscover.DelNodeId = nodeId
//删除结点
cluster.DelNode(nodeId)
//无注册过的结点不广播避免非当前Master网络中的连接断开时通知到本网络
ds.CastGo(SubServiceDiscover, &notifyDiscover)
}
func (ds *OriginDiscoveryMaster) RpcCastGo(serviceMethod string, args interface{}) {
for nodeId, _ := range ds.mapNodeInfo {
if nodeId == cluster.GetLocalNodeInfo().NodeId {
continue
}
ds.GoNode(nodeId, serviceMethod, args)
}
}
func (ds *OriginDiscoveryMaster) RPC_Ping(req *rpc.Ping, res *rpc.Pong) error {
if ds.isRegNode(req.NodeId) == false{
res.Ok = false
return nil
}
res.Ok = true
ds.nsTTL.addAndRefreshNode(req.NodeId)
return nil
}
func (ds *OriginDiscoveryMaster) RPC_NodeRetire(req *rpc.NodeRetireReq, res *rpc.Empty) error {
log.Info("node is retire",log.String("nodeId",req.NodeInfo.NodeId),log.Bool("retire",req.NodeInfo.Retire))
ds.updateNodeInfo(req.NodeInfo)
var notifyDiscover rpc.SubscribeDiscoverNotify
notifyDiscover.MasterNodeId = cluster.GetLocalNodeInfo().NodeId
notifyDiscover.NodeInfo = append(notifyDiscover.NodeInfo, req.NodeInfo)
ds.RpcCastGo(SubServiceDiscover, &notifyDiscover)
return nil
}
// 收到注册过来的结点
func (ds *OriginDiscoveryMaster) RPC_RegServiceDiscover(req *rpc.RegServiceDiscoverReq, res *rpc.SubscribeDiscoverNotify) error {
if req.NodeInfo == nil {
err := errors.New("RPC_RegServiceDiscover req is error.")
log.Error(err.Error())
return err
}
if req.NodeInfo.NodeId != cluster.GetLocalNodeInfo().NodeId {
ds.nsTTL.addAndRefreshNode(req.NodeInfo.NodeId)
}
//广播给其他所有结点
var notifyDiscover rpc.SubscribeDiscoverNotify
notifyDiscover.MasterNodeId = cluster.GetLocalNodeInfo().NodeId
notifyDiscover.NodeInfo = append(notifyDiscover.NodeInfo, req.NodeInfo)
ds.RpcCastGo(SubServiceDiscover, &notifyDiscover)
//存入本地
ds.addNodeInfo(req.NodeInfo)
//初始化结点信息
var nodeInfo NodeInfo
nodeInfo.NodeId = req.NodeInfo.NodeId
nodeInfo.Private = req.NodeInfo.Private
nodeInfo.ServiceList = req.NodeInfo.PublicServiceList
nodeInfo.PublicServiceList = req.NodeInfo.PublicServiceList
nodeInfo.ListenAddr = req.NodeInfo.ListenAddr
nodeInfo.MaxRpcParamLen = req.NodeInfo.MaxRpcParamLen
nodeInfo.Retire = req.NodeInfo.Retire
//主动删除已经存在的结点,确保先断开,再连接
cluster.serviceDiscoveryDelNode(nodeInfo.NodeId)
//加入到本地Cluster模块中将连接该结点
cluster.serviceDiscoverySetNodeInfo(&nodeInfo)
res.IsFull = true
res.NodeInfo = ds.nodeInfo
res.MasterNodeId = cluster.GetLocalNodeInfo().NodeId
return nil
}
func (ds *OriginDiscoveryMaster) RPC_UnRegServiceDiscover(req *rpc.UnRegServiceDiscoverReq, res *rpc.Empty) error {
log.Debug("RPC_UnRegServiceDiscover",log.String("nodeId",req.NodeId))
ds.OnNodeDisconnect(req.NodeId)
return nil
}
func (dc *OriginDiscoveryClient) OnInit() error {
dc.RegNodeConnListener(dc)
dc.RegNatsConnListener(dc)
dc.mapDiscovery = map[string]map[string][]string{}
//dc.mapMasterNetwork = map[string]string{}
return nil
}
func (dc *OriginDiscoveryClient) addMasterNode(masterNodeId string, nodeId string,serviceList []string) {
_, ok := dc.mapDiscovery[masterNodeId]
if ok == false {
dc.mapDiscovery[masterNodeId] = map[string][]string{}
}
dc.mapDiscovery[masterNodeId][nodeId] = serviceList
}
func (dc *OriginDiscoveryClient) getNodePublicService(masterNodeId string,nodeId string) []string{
mapNodeId, ok := dc.mapDiscovery[masterNodeId]
if ok == false {
return nil
}
publicService := mapNodeId[nodeId]
return publicService
}
func (dc *OriginDiscoveryClient) removeMasterNode(masterNodeId string, nodeId string) {
mapNodeId, ok := dc.mapDiscovery[masterNodeId]
if ok == false {
return
}
delete(mapNodeId, nodeId)
}
func (dc *OriginDiscoveryClient) findNodeId(nodeId string) bool {
for _, mapNodeId := range dc.mapDiscovery {
_, ok := mapNodeId[nodeId]
if ok == true {
return true
}
}
return false
}
func (dc *OriginDiscoveryClient) ping(){
interval := time.Duration(cluster.GetOriginDiscovery().TTLSecond)*time.Second
interval = interval /3
if interval < time.Second {
interval = time.Second
}
dc.NewTicker(interval,func(t *timer.Ticker){
if cluster.IsNatsMode() == false || dc.isRegisterOk == false{
return
}
var ping rpc.Ping
ping.NodeId = cluster.GetLocalNodeInfo().NodeId
masterNodes := GetCluster().GetOriginDiscovery().MasterNodeList
for i:=0;i<len(masterNodes);i++ {
if masterNodes[i].NodeId == cluster.GetLocalNodeInfo().NodeId {
continue
}
masterNodeId := masterNodes[i].NodeId
dc.AsyncCallNodeWithTimeout(3*time.Second,masterNodeId,RpcPingMethod,&ping, func(empty *rpc.Pong,err error) {
if err == nil && empty.Ok == false{
//断开master重
dc.regServiceDiscover(masterNodeId)
}
})
}
})
}
func (dc *OriginDiscoveryClient) OnStart() {
//2.添加并连接发现主结点
dc.addDiscoveryMaster()
dc.ping()
}
func (dc *OriginDiscoveryClient) addDiscoveryMaster() {
discoveryNodeList := cluster.GetOriginDiscovery()
for i := 0; i < len(discoveryNodeList.MasterNodeList); i++ {
if discoveryNodeList.MasterNodeList[i].NodeId == cluster.GetLocalNodeInfo().NodeId {
continue
}
dc.funSetNode(&discoveryNodeList.MasterNodeList[i])
}
}
func (dc *OriginDiscoveryClient) fullCompareDiffNode(masterNodeId string, mapNodeInfo map[string]*rpc.NodeInfo) []string {
if mapNodeInfo == nil {
return nil
}
diffNodeIdSlice := make([]string, 0, len(mapNodeInfo))
mapNodeId := map[string][]string{}
mapNodeId, ok := dc.mapDiscovery[masterNodeId]
if ok == false {
return nil
}
//本地任何Master都不存在的放到diffNodeIdSlice
for nodeId, _ := range mapNodeId {
_, ok := mapNodeInfo[nodeId]
if ok == false {
diffNodeIdSlice = append(diffNodeIdSlice, nodeId)
}
}
return diffNodeIdSlice
}
//订阅发现的服务通知
func (dc *OriginDiscoveryClient) RPC_SubServiceDiscover(req *rpc.SubscribeDiscoverNotify) error {
mapNodeInfo := map[string]*rpc.NodeInfo{}
for _, nodeInfo := range req.NodeInfo {
//不对本地结点或者不存在任何公开服务的结点
if nodeInfo.NodeId == dc.localNodeId {
continue
}
if cluster.IsOriginMasterDiscoveryNode(cluster.GetLocalNodeInfo().NodeId) == false && len(nodeInfo.PublicServiceList) == 1 &&
nodeInfo.PublicServiceList[0] == OriginDiscoveryClientName {
continue
}
//遍历所有的公开服务,并筛选之
for _, serviceName := range nodeInfo.PublicServiceList {
nInfo := mapNodeInfo[nodeInfo.NodeId]
if nInfo == nil {
nInfo = &rpc.NodeInfo{}
nInfo.NodeId = nodeInfo.NodeId
nInfo.ListenAddr = nodeInfo.ListenAddr
nInfo.MaxRpcParamLen = nodeInfo.MaxRpcParamLen
nInfo.Retire = nodeInfo.Retire
nInfo.Private = nodeInfo.Private
mapNodeInfo[nodeInfo.NodeId] = nInfo
}
nInfo.PublicServiceList = append(nInfo.PublicServiceList, serviceName)
}
}
//如果为完整同步,则找出差异的结点
var willDelNodeId []string
if req.IsFull == true {
diffNode := dc.fullCompareDiffNode(req.MasterNodeId, mapNodeInfo)
if len(diffNode) > 0 {
willDelNodeId = append(willDelNodeId, diffNode...)
}
}
//指定删除结点
if req.DelNodeId != rpc.NodeIdNull && req.DelNodeId != dc.localNodeId {
willDelNodeId = append(willDelNodeId, req.DelNodeId)
}
//删除不必要的结点
for _, nodeId := range willDelNodeId {
dc.funDelNode(nodeId)
dc.removeMasterNode(req.MasterNodeId, nodeId)
}
//设置新结点
for _, nodeInfo := range mapNodeInfo {
dc.addMasterNode(req.MasterNodeId, nodeInfo.NodeId,nodeInfo.PublicServiceList)
bSet := dc.setNodeInfo(req.MasterNodeId,nodeInfo)
if bSet == false {
continue
}
}
return nil
}
func (dc *OriginDiscoveryClient) OnNodeConnected(nodeId string) {
dc.regServiceDiscover(nodeId)
}
func (dc *OriginDiscoveryClient) OnRelease(){
log.Debug("OriginDiscoveryClient")
//取消注册
var nodeRetireReq rpc.UnRegServiceDiscoverReq
nodeRetireReq.NodeId = cluster.GetLocalNodeInfo().NodeId
masterNodeList := cluster.GetOriginDiscovery()
for i:=0;i<len(masterNodeList.MasterNodeList);i++{
if masterNodeList.MasterNodeList[i].NodeId == cluster.GetLocalNodeInfo().NodeId {
continue
}
err := dc.CallNodeWithTimeout(3*time.Second,masterNodeList.MasterNodeList[i].NodeId,UnRegServiceDiscover,&nodeRetireReq,&rpc.Empty{})
if err!= nil {
log.Error("call "+UnRegServiceDiscover+" is fail",log.ErrorAttr("err",err))
}
}
}
func (dc *OriginDiscoveryClient) OnRetire(){
dc.bRetire = true
masterNodeList := cluster.GetOriginDiscovery()
for i:=0;i<len(masterNodeList.MasterNodeList);i++{
var nodeRetireReq rpc.NodeRetireReq
nodeRetireReq.NodeInfo = &rpc.NodeInfo{}
nodeRetireReq.NodeInfo.NodeId = cluster.localNodeInfo.NodeId
nodeRetireReq.NodeInfo.ListenAddr = cluster.localNodeInfo.ListenAddr
nodeRetireReq.NodeInfo.MaxRpcParamLen = cluster.localNodeInfo.MaxRpcParamLen
nodeRetireReq.NodeInfo.PublicServiceList = cluster.localNodeInfo.PublicServiceList
nodeRetireReq.NodeInfo.Retire = dc.bRetire
nodeRetireReq.NodeInfo.Private = cluster.localNodeInfo.Private
err := dc.GoNode(masterNodeList.MasterNodeList[i].NodeId,NodeRetireRpcMethod,&nodeRetireReq)
if err!= nil {
log.Error("call "+NodeRetireRpcMethod+" is fail",log.ErrorAttr("err",err))
}
}
}
func (dc *OriginDiscoveryClient) tryRegServiceDiscover(nodeId string){
dc.AfterFunc(time.Second*3, func(timer *timer.Timer) {
dc.regServiceDiscover(nodeId)
})
}
func (dc *OriginDiscoveryClient) regServiceDiscover(nodeId string){
if nodeId == cluster.GetLocalNodeInfo().NodeId {
return
}
nodeInfo := cluster.getOriginMasterDiscoveryNodeInfo(nodeId)
if nodeInfo == nil {
return
}
var req rpc.RegServiceDiscoverReq
req.NodeInfo = &rpc.NodeInfo{}
req.NodeInfo.NodeId = cluster.localNodeInfo.NodeId
req.NodeInfo.ListenAddr = cluster.localNodeInfo.ListenAddr
req.NodeInfo.MaxRpcParamLen = cluster.localNodeInfo.MaxRpcParamLen
req.NodeInfo.PublicServiceList = cluster.localNodeInfo.PublicServiceList
req.NodeInfo.Retire = dc.bRetire
req.NodeInfo.Private = cluster.localNodeInfo.Private
log.Debug("regServiceDiscover",log.String("nodeId",nodeId))
//向Master服务同步本Node服务信息
_,err := dc.AsyncCallNodeWithTimeout(3*time.Second,nodeId, RegServiceDiscover, &req, func(res *rpc.SubscribeDiscoverNotify, err error) {
if err != nil {
log.Error("call "+RegServiceDiscover+" is fail :"+ err.Error())
dc.tryRegServiceDiscover(nodeId)
return
}
dc.isRegisterOk = true
dc.RPC_SubServiceDiscover(res)
})
if err != nil {
log.Error("call "+ RegServiceDiscover+" is fail :"+ err.Error())
dc.tryRegServiceDiscover(nodeId)
}
}
func (dc *OriginDiscoveryClient) setNodeInfo(masterNodeId string,nodeInfo *rpc.NodeInfo) bool{
if nodeInfo == nil || nodeInfo.Private == true || nodeInfo.NodeId == dc.localNodeId {
return false
}
//筛选关注的服务
var discoverServiceSlice = make([]string, 0, 24)
for _, pubService := range nodeInfo.PublicServiceList {
if cluster.CanDiscoveryService(masterNodeId,pubService) == true {
discoverServiceSlice = append(discoverServiceSlice,pubService)
}
}
if len(discoverServiceSlice) == 0 {
return false
}
var nInfo NodeInfo
nInfo.ServiceList = discoverServiceSlice
nInfo.PublicServiceList = discoverServiceSlice
nInfo.NodeId = nodeInfo.NodeId
nInfo.ListenAddr = nodeInfo.ListenAddr
nInfo.MaxRpcParamLen = nodeInfo.MaxRpcParamLen
nInfo.Retire = nodeInfo.Retire
nInfo.Private = nodeInfo.Private
dc.funSetNode(&nInfo)
return true
}
func (dc *OriginDiscoveryClient) OnNodeDisconnect(nodeId string) {
//将Discard结点清理
cluster.DiscardNode(nodeId)
}
func (dc *OriginDiscoveryClient) InitDiscovery(localNodeId string, funDelNode FunDelNode, funSetNode FunSetNode) error {
dc.localNodeId = localNodeId
dc.funDelNode = funDelNode
dc.funSetNode = funSetNode
return nil
}
func (cls *Cluster) checkOriginDiscovery(localNodeId string) (bool, bool) {
var localMaster bool //本结点是否为Master结点
var hasMaster bool //是否配置Master服务
//遍历所有结点
for _, nodeInfo := range cls.discoveryInfo.Origin.MasterNodeList {
if nodeInfo.NodeId == localNodeId {
localMaster = true
}
hasMaster = true
}
//返回查询结果
return localMaster, hasMaster
}
func (cls *Cluster) AddDiscoveryService(serviceName string, bPublicService bool) {
addServiceList := append([]string{},serviceName)
cls.localNodeInfo.ServiceList = append(addServiceList,cls.localNodeInfo.ServiceList...)
if bPublicService {
cls.localNodeInfo.PublicServiceList = append(cls.localNodeInfo.PublicServiceList, serviceName)
}
if _, ok := cls.mapServiceNode[serviceName]; ok == false {
cls.mapServiceNode[serviceName] = map[string]struct{}{}
}
cls.mapServiceNode[serviceName][cls.localNodeInfo.NodeId] = struct{}{}
}
func (cls *Cluster) IsOriginMasterDiscoveryNode(nodeId string) bool {
return cls.getOriginMasterDiscoveryNodeInfo(nodeId) != nil
}
func (cls *Cluster) getOriginMasterDiscoveryNodeInfo(nodeId string) *NodeInfo {
if cls.discoveryInfo.Origin == nil {
return nil
}
for i := 0; i < len(cls.discoveryInfo.Origin.MasterNodeList); i++ {
if cls.discoveryInfo.Origin.MasterNodeList[i].NodeId == nodeId {
return &cls.discoveryInfo.Origin.MasterNodeList[i]
}
}
return nil
}
func (dc *OriginDiscoveryClient) OnNatsConnected(){
masterNodes := GetCluster().GetOriginDiscovery().MasterNodeList
for i:=0;i<len(masterNodes);i++ {
dc.regServiceDiscover(masterNodes[i].NodeId)
}
}
func (dc *OriginDiscoveryClient) OnNatsDisconnect(){
}

View File

@@ -2,23 +2,162 @@ package cluster
import (
"fmt"
"github.com/duanhf2012/origin/log"
"github.com/duanhf2012/origin/rpc"
"github.com/duanhf2012/origin/v2/log"
"github.com/duanhf2012/origin/v2/rpc"
jsoniter "github.com/json-iterator/go"
"io/ioutil"
"os"
"path/filepath"
"strings"
"time"
"errors"
)
var json = jsoniter.ConfigCompatibleWithStandardLibrary
type EtcdList struct {
NetworkName []string
Endpoints []string
}
type EtcdDiscovery struct {
DialTimeoutMillisecond time.Duration
TTLSecond int64
EtcdList []EtcdList
}
type OriginDiscovery struct {
TTLSecond int64
MasterNodeList []NodeInfo
}
type DiscoveryType int
const (
InvalidType = 0
OriginType = 1
EtcdType = 2
)
const MinTTL = 3
type DiscoveryInfo struct {
discoveryType DiscoveryType
Etcd *EtcdDiscovery //etcd
Origin *OriginDiscovery //orign
}
type NatsConfig struct {
NatsUrl string
NoRandomize bool
}
type RpcMode struct {
Typ string `json:"Type"`
Nats NatsConfig
}
type NodeInfoList struct {
MasterDiscoveryNode []NodeInfo //用于服务发现Node
RpcMode RpcMode
Discovery DiscoveryInfo
NodeList []NodeInfo
}
func (d *DiscoveryInfo) getDiscoveryType() DiscoveryType{
return d.discoveryType
}
func (d *DiscoveryInfo) setDiscovery(discoveryInfo *DiscoveryInfo) error{
var err error
err = d.setOrigin(discoveryInfo.Origin)
if err != nil {
return err
}
err = d.setEtcd(discoveryInfo.Etcd)
if err != nil {
return err
}
return nil
}
func (d *DiscoveryInfo) setEtcd(etcd *EtcdDiscovery) error{
if etcd == nil {
return nil
}
if d.discoveryType != InvalidType {
return fmt.Errorf("Repeat configuration of Discovery")
}
//Endpoints不允许重复
mapAddr:=make (map[string]struct{})
for _, n := range etcd.EtcdList {
for _,endPoint := range n.Endpoints {
if _,ok:=mapAddr[endPoint];ok == true {
return fmt.Errorf("etcd discovery config Etcd.EtcdList.Endpoints %+v is repeat",endPoint)
}
mapAddr[endPoint] = struct{}{}
}
//networkName不允许重复
mapNetworkName := make(map[string]struct{})
for _,netName := range n.NetworkName{
if _,ok := mapNetworkName[netName];ok == true {
return fmt.Errorf("etcd discovery config Etcd.EtcdList.NetworkName %+v is repeat",n.NetworkName)
}
mapNetworkName[netName] = struct{}{}
}
}
if etcd.TTLSecond < MinTTL {
etcd.TTLSecond = MinTTL
}
etcd.DialTimeoutMillisecond = etcd.DialTimeoutMillisecond * time.Millisecond
d.Etcd = etcd
d.discoveryType = EtcdType
return nil
}
func (d *DiscoveryInfo) setOrigin(originDiscovery *OriginDiscovery) error{
if originDiscovery== nil || len(originDiscovery.MasterNodeList)==0 {
return nil
}
if d.discoveryType != InvalidType {
return fmt.Errorf("Repeat configuration of Discovery")
}
mapListenAddr := make(map[string]struct{})
mapNodeId := make(map[string]struct{})
for _, n := range originDiscovery.MasterNodeList {
if _, ok := mapListenAddr[n.ListenAddr]; ok == true {
return fmt.Errorf("discovery config Origin.ListenAddr %s is repeat", n.ListenAddr)
}
mapListenAddr[n.ListenAddr] = struct{}{}
if _, ok := mapNodeId[n.NodeId]; ok == true {
return fmt.Errorf("discovery config Origin.NodeId %s is repeat", n.NodeId)
}
mapNodeId[n.NodeId] = struct{}{}
}
d.Origin = originDiscovery
if d.Origin.TTLSecond < MinTTL {
d.Origin.TTLSecond = MinTTL
}
d.discoveryType = OriginType
return nil
}
func (cls *Cluster) ReadClusterConfig(filepath string) (*NodeInfoList, error) {
c := &NodeInfoList{}
d, err := ioutil.ReadFile(filepath)
d, err := os.ReadFile(filepath)
if err != nil {
return nil, err
}
@@ -30,10 +169,10 @@ func (cls *Cluster) ReadClusterConfig(filepath string) (*NodeInfoList, error) {
return c, nil
}
func (cls *Cluster) readServiceConfig(filepath string) (interface{}, map[string]interface{}, map[int]map[string]interface{}, error) {
func (cls *Cluster) readServiceConfig(filepath string) (interface{}, map[string]interface{}, map[string]map[string]interface{}, error) {
c := map[string]interface{}{}
//读取配置
d, err := ioutil.ReadFile(filepath)
d, err := os.ReadFile(filepath)
if err != nil {
return nil, nil, nil, err
}
@@ -49,7 +188,7 @@ func (cls *Cluster) readServiceConfig(filepath string) (interface{}, map[string]
serviceConfig = serviceCfg.(map[string]interface{})
}
mapNodeService := map[int]map[string]interface{}{}
mapNodeService := map[string]map[string]interface{}{}
nodeServiceCfg, ok := c["NodeService"]
if ok == true {
nodeServiceList := nodeServiceCfg.([]interface{})
@@ -57,42 +196,81 @@ func (cls *Cluster) readServiceConfig(filepath string) (interface{}, map[string]
serviceCfg := v.(map[string]interface{})
nodeId, ok := serviceCfg["NodeId"]
if ok == false {
log.SFatal("NodeService list not find nodeId field")
log.Fatal("NodeService list not find nodeId field")
}
mapNodeService[int(nodeId.(float64))] = serviceCfg
mapNodeService[nodeId.(string)] = serviceCfg
}
}
return GlobalCfg, serviceConfig, mapNodeService, nil
}
func (cls *Cluster) readLocalClusterConfig(nodeId int) ([]NodeInfo, []NodeInfo, error) {
func (cls *Cluster) SetRpcMode(cfgRpcMode *RpcMode,rpcMode *RpcMode) error {
//忽略掉没有设置的配置
if cfgRpcMode.Typ == "" {
return nil
}
//不允许重复的配置Rpc模式
if cfgRpcMode.Typ != "" && rpcMode.Typ != ""{
return errors.New("repeat config RpcMode")
}
//检查Typ是否合法
if cfgRpcMode.Typ!="Nats" && cfgRpcMode.Typ!="Default" {
return fmt.Errorf("RpcMode %s is not support", rpcMode.Typ)
}
if cfgRpcMode.Typ == "Nats" && len(cfgRpcMode.Nats.NatsUrl)==0 {
return fmt.Errorf("nats rpc mode config NatsUrl is empty")
}
*rpcMode = *cfgRpcMode
return nil
}
func (cls *Cluster) readLocalClusterConfig(nodeId string) (DiscoveryInfo, []NodeInfo,RpcMode, error) {
var nodeInfoList []NodeInfo
var masterDiscoverNodeList []NodeInfo
var discoveryInfo DiscoveryInfo
var rpcMode RpcMode
clusterCfgPath := strings.TrimRight(configDir, "/") + "/cluster"
fileInfoList, err := ioutil.ReadDir(clusterCfgPath)
fileInfoList, err := os.ReadDir(clusterCfgPath)
if err != nil {
return nil, nil, fmt.Errorf("Read dir %s is fail :%+v", clusterCfgPath, err)
return discoveryInfo, nil,rpcMode, fmt.Errorf("Read dir %s is fail :%+v", clusterCfgPath, err)
}
//读取任何文件,只读符合格式的配置,目录下的文件可以自定义分文件
for _, f := range fileInfoList {
if f.IsDir() == false {
filePath := strings.TrimRight(strings.TrimRight(clusterCfgPath, "/"), "\\") + "/" + f.Name()
localNodeInfoList, err := cls.ReadClusterConfig(filePath)
if err != nil {
return nil, nil, fmt.Errorf("read file path %s is error:%+v", filePath, err)
fileNodeInfoList, rerr := cls.ReadClusterConfig(filePath)
if rerr != nil {
return discoveryInfo, nil,rpcMode, fmt.Errorf("read file path %s is error:%+v", filePath, rerr)
}
masterDiscoverNodeList = append(masterDiscoverNodeList, localNodeInfoList.MasterDiscoveryNode...)
for _, nodeInfo := range localNodeInfoList.NodeList {
if nodeInfo.NodeId == nodeId || nodeId == 0 {
err = cls.SetRpcMode(&fileNodeInfoList.RpcMode,&rpcMode)
if err != nil {
return discoveryInfo, nil,rpcMode, err
}
err = discoveryInfo.setDiscovery(&fileNodeInfoList.Discovery)
if err != nil {
return discoveryInfo,nil,rpcMode,err
}
for _, nodeInfo := range fileNodeInfoList.NodeList {
if nodeInfo.NodeId == nodeId || nodeId == rpc.NodeIdNull {
nodeInfoList = append(nodeInfoList, nodeInfo)
}
}
}
}
if nodeId != 0 && (len(nodeInfoList) != 1) {
return nil, nil, fmt.Errorf("%d configurations were found for the configuration with node ID %d!", len(nodeInfoList), nodeId)
if nodeId != rpc.NodeIdNull && (len(nodeInfoList) != 1) {
return discoveryInfo, nil,rpcMode, fmt.Errorf("nodeid %s configuration error in NodeList", nodeId)
}
for i, _ := range nodeInfoList {
@@ -106,98 +284,156 @@ func (cls *Cluster) readLocalClusterConfig(nodeId int) ([]NodeInfo, []NodeInfo,
}
}
return masterDiscoverNodeList, nodeInfoList, nil
return discoveryInfo, nodeInfoList, rpcMode,nil
}
func (cls *Cluster) readLocalService(localNodeId int) error {
func (cls *Cluster) readLocalService(localNodeId string) error {
clusterCfgPath := strings.TrimRight(configDir, "/") + "/cluster"
fileInfoList, err := ioutil.ReadDir(clusterCfgPath)
fileInfoList, err := os.ReadDir(clusterCfgPath)
if err != nil {
return fmt.Errorf("Read dir %s is fail :%+v", clusterCfgPath, err)
}
var globalCfg interface{}
publicService := map[string]interface{}{}
nodeService := map[string]interface{}{}
//读取任何文件,只读符合格式的配置,目录下的文件可以自定义分文件
for _, f := range fileInfoList {
if f.IsDir() == false {
filePath := strings.TrimRight(strings.TrimRight(clusterCfgPath, "/"), "\\") + "/" + f.Name()
if f.IsDir() == true {
continue
}
if filepath.Ext(f.Name())!= ".json" {
continue
}
filePath := strings.TrimRight(strings.TrimRight(clusterCfgPath, "/"), "\\") + "/" + f.Name()
currGlobalCfg, serviceConfig, mapNodeService, err := cls.readServiceConfig(filePath)
if err != nil {
continue
if err != nil {
continue
}
if currGlobalCfg != nil {
//不允许重复的配置global配置
if globalCfg != nil {
return fmt.Errorf("[Global] does not allow repeated configuration in %s.",f.Name())
}
globalCfg = currGlobalCfg
}
if currGlobalCfg != nil {
cls.globalCfg = currGlobalCfg
}
for _, s := range cls.localNodeInfo.ServiceList {
for {
//取公共服务配置
pubCfg, ok := serviceConfig[s]
if ok == true {
cls.localServiceCfg[s] = pubCfg
//保存公共配置
for _, s := range cls.localNodeInfo.ServiceList {
for {
splitServiceName := strings.Split(s,":")
if len(splitServiceName) == 2 {
s = splitServiceName[0]
}
//取公共服务配置
pubCfg, ok := serviceConfig[s]
if ok == true {
if _,publicOk := publicService[s];publicOk == true {
return fmt.Errorf("public service [%s] does not allow repeated configuration in %s.",s,f.Name())
}
publicService[s] = pubCfg
}
//如果结点配置了该服务,则覆盖之
nodeService, ok := mapNodeService[localNodeId]
if ok == false {
break
}
sCfg, ok := nodeService[s]
if ok == false {
break
}
cls.localServiceCfg[s] = sCfg
//取指定结点配置的服务
nodeServiceCfg,ok := mapNodeService[localNodeId]
if ok == false {
break
}
nodeCfg, ok := nodeServiceCfg[s]
if ok == false {
break
}
if _,nodeOK := nodeService[s];nodeOK == true {
return fmt.Errorf("NodeService NodeId[%d] Service[%s] does not allow repeated configuration in %s.",cls.localNodeInfo.NodeId,s,f.Name())
}
nodeService[s] = nodeCfg
break
}
}
}
//组合所有的配置
for _, s := range cls.localNodeInfo.ServiceList {
splitServiceName := strings.Split(s,":")
if len(splitServiceName) == 2 {
s = splitServiceName[0]
}
//先从NodeService中找
var serviceCfg interface{}
var ok bool
serviceCfg,ok = nodeService[s]
if ok == true {
cls.localServiceCfg[s] =serviceCfg
continue
}
//如果找不到从PublicService中找
serviceCfg,ok = publicService[s]
if ok == true {
cls.localServiceCfg[s] =serviceCfg
}
}
cls.globalCfg = globalCfg
return nil
}
func (cls *Cluster) parseLocalCfg() {
cls.mapIdNode[cls.localNodeInfo.NodeId] = cls.localNodeInfo
rpcInfo := NodeRpcInfo{}
rpcInfo.nodeInfo = cls.localNodeInfo
rpcInfo.client = rpc.NewLClient(rpcInfo.nodeInfo.NodeId,&cls.callSet)
for _, sName := range cls.localNodeInfo.ServiceList {
if _, ok := cls.mapServiceNode[sName]; ok == false {
cls.mapServiceNode[sName] = make(map[int]struct{})
}
cls.mapRpc[cls.localNodeInfo.NodeId] = &rpcInfo
cls.mapServiceNode[sName][cls.localNodeInfo.NodeId] = struct{}{}
}
}
func (cls *Cluster) checkDiscoveryNodeList(discoverMasterNode []NodeInfo) bool {
for i := 0; i < len(discoverMasterNode)-1; i++ {
for j := i + 1; j < len(discoverMasterNode); j++ {
if discoverMasterNode[i].NodeId == discoverMasterNode[j].NodeId ||
discoverMasterNode[i].ListenAddr == discoverMasterNode[j].ListenAddr {
return false
for _, serviceName := range cls.localNodeInfo.ServiceList {
splitServiceName := strings.Split(serviceName,":")
if len(splitServiceName) == 2 {
serviceName = splitServiceName[0]
templateServiceName := splitServiceName[1]
//记录模板
if _, ok := cls.mapTemplateServiceNode[templateServiceName]; ok == false {
cls.mapTemplateServiceNode[templateServiceName]=map[string]struct{}{}
}
cls.mapTemplateServiceNode[templateServiceName][serviceName] = struct{}{}
}
}
return true
if _, ok := cls.mapServiceNode[serviceName]; ok == false {
cls.mapServiceNode[serviceName] = make(map[string]struct{})
}
cls.mapServiceNode[serviceName][cls.localNodeInfo.NodeId] = struct{}{}
}
}
func (cls *Cluster) InitCfg(localNodeId int) error {
func (cls *Cluster) IsNatsMode() bool {
return cls.rpcMode.Typ == "Nats"
}
func (cls *Cluster) GetNatsUrl() string {
return cls.rpcMode.Nats.NatsUrl
}
func (cls *Cluster) InitCfg(localNodeId string) error {
cls.localServiceCfg = map[string]interface{}{}
cls.mapRpc = map[int]NodeRpcInfo{}
cls.mapIdNode = map[int]NodeInfo{}
cls.mapServiceNode = map[string]map[int]struct{}{}
cls.mapRpc = map[string]*NodeRpcInfo{}
cls.mapServiceNode = map[string]map[string]struct{}{}
cls.mapTemplateServiceNode = map[string]map[string]struct{}{}
//加载本地结点的NodeList配置
discoveryNode, nodeInfoList, err := cls.readLocalClusterConfig(localNodeId)
discoveryInfo, nodeInfoList,rpcMode, err := cls.readLocalClusterConfig(localNodeId)
if err != nil {
return err
}
cls.localNodeInfo = nodeInfoList[0]
if cls.checkDiscoveryNodeList(discoveryNode) == false {
return fmt.Errorf("DiscoveryNode config is error!")
}
cls.masterDiscoveryNodeList = discoveryNode
cls.discoveryInfo = discoveryInfo
cls.rpcMode = rpcMode
//读取本地服务配置
err = cls.readLocalService(localNodeId)
@@ -222,35 +458,54 @@ func (cls *Cluster) IsConfigService(serviceName string) bool {
return ok
}
func (cls *Cluster) GetNodeIdByService(serviceName string, rpcClientList []*rpc.Client, bAll bool) (error, int) {
func (cls *Cluster) GetNodeIdByTemplateService(templateServiceName string, rpcClientList []*rpc.Client, filterRetire bool) (error, []*rpc.Client) {
cls.locker.RLock()
defer cls.locker.RUnlock()
mapServiceName := cls.mapTemplateServiceNode[templateServiceName]
for serviceName := range mapServiceName {
mapNodeId, ok := cls.mapServiceNode[serviceName]
if ok == true {
for nodeId, _ := range mapNodeId {
pClient,retire := GetCluster().getRpcClient(nodeId)
if pClient == nil || pClient.IsConnected() == false {
continue
}
//如果需要筛选掉退休的对retire状态的结点略过
if filterRetire == true && retire == true {
continue
}
rpcClientList = append(rpcClientList,pClient)
}
}
}
return nil, rpcClientList
}
func (cls *Cluster) GetNodeIdByService(serviceName string, rpcClientList []*rpc.Client, filterRetire bool) (error, []*rpc.Client) {
cls.locker.RLock()
defer cls.locker.RUnlock()
mapNodeId, ok := cls.mapServiceNode[serviceName]
count := 0
if ok == true {
for nodeId, _ := range mapNodeId {
pClient := GetCluster().getRpcClient(nodeId)
if pClient == nil || (bAll == false && pClient.IsConnected() == false) {
pClient,retire := GetCluster().getRpcClient(nodeId)
if pClient == nil || pClient.IsConnected() == false {
continue
}
rpcClientList[count] = pClient
count++
if count >= cap(rpcClientList) {
break
//如果需要筛选掉退休的对retire状态的结点略过
if filterRetire == true && retire == true {
continue
}
rpcClientList = append(rpcClientList,pClient)
}
}
return nil, count
}
func (cls *Cluster) getServiceCfg(serviceName string) interface{} {
v, ok := cls.localServiceCfg[serviceName]
if ok == false {
return nil
}
return v
return nil, rpcClientList
}
func (cls *Cluster) GetServiceCfg(serviceName string) interface{} {

View File

@@ -1,12 +1,12 @@
package cluster
type OperType int
type FunDelNode func (nodeId int,immediately bool)
type FunSetNodeInfo func(nodeInfo *NodeInfo)
type FunDelNode func (nodeId string)
type FunSetNode func(nodeInfo *NodeInfo)
type IServiceDiscovery interface {
InitDiscovery(localNodeId int,funDelNode FunDelNode,funSetNodeInfo FunSetNodeInfo) error
OnNodeStop()
InitDiscovery(localNodeId string,funDelNode FunDelNode,funSetNodeInfo FunSetNode) error
}

99
concurrent/concurrent.go Normal file
View File

@@ -0,0 +1,99 @@
package concurrent
import (
"errors"
"runtime"
"github.com/duanhf2012/origin/v2/log"
"sync/atomic"
)
const defaultMaxTaskChannelNum = 1000000
type IConcurrent interface {
OpenConcurrentByNumCPU(cpuMul float32)
OpenConcurrent(minGoroutineNum int32, maxGoroutineNum int32, maxTaskChannelNum int)
AsyncDoByQueue(queueId int64, fn func() bool, cb func(err error))
AsyncDo(f func() bool, cb func(err error))
}
type Concurrent struct {
dispatch
tasks chan task
cbChannel chan func(error)
open int32
}
/*
cpuMul 表示cpu的倍数
建议:(1)cpu密集型 使用1 (2)i/o密集型使用2或者更高
*/
func (c *Concurrent) OpenConcurrentByNumCPU(cpuNumMul float32) {
goroutineNum := int32(float32(runtime.NumCPU())*cpuNumMul + 1)
c.OpenConcurrent(goroutineNum, goroutineNum, defaultMaxTaskChannelNum)
}
func (c *Concurrent) OpenConcurrent(minGoroutineNum int32, maxGoroutineNum int32, maxTaskChannelNum int) {
if atomic.AddInt32(&c.open,1) > 1 {
panic("repeated calls to OpenConcurrent are not allowed!")
}
c.tasks = make(chan task, maxTaskChannelNum)
c.cbChannel = make(chan func(error), maxTaskChannelNum)
//打开dispach
c.dispatch.open(minGoroutineNum, maxGoroutineNum, c.tasks, c.cbChannel)
}
func (c *Concurrent) AsyncDo(f func() bool, cb func(err error)) {
c.AsyncDoByQueue(0, f, cb)
}
func (c *Concurrent) AsyncDoByQueue(queueId int64, fn func() bool, cb func(err error)) {
if cap(c.tasks) == 0 {
panic("not open concurrent")
}
if fn == nil && cb == nil {
log.Stack("fn and cb is nil")
return
}
if fn == nil {
c.pushAsyncDoCallbackEvent(cb)
return
}
if queueId != 0 {
queueId = queueId % maxTaskQueueSessionId+1
}
select {
case c.tasks <- task{queueId, fn, cb}:
default:
log.Error("tasks channel is full")
if cb != nil {
c.pushAsyncDoCallbackEvent(func(err error) {
cb(errors.New("tasks channel is full"))
})
}
return
}
}
func (c *Concurrent) Close() {
if cap(c.tasks) == 0 {
return
}
log.Info("wait close concurrent")
c.dispatch.close()
log.Info("concurrent has successfully exited")
}
func (c *Concurrent) GetCallBackChannel() chan func(error) {
return c.cbChannel
}

195
concurrent/dispatch.go Normal file
View File

@@ -0,0 +1,195 @@
package concurrent
import (
"sync"
"sync/atomic"
"time"
"fmt"
"runtime"
"github.com/duanhf2012/origin/v2/log"
"github.com/duanhf2012/origin/v2/util/queue"
)
var idleTimeout = int64(2 * time.Second)
const maxTaskQueueSessionId = 10000
type dispatch struct {
minConcurrentNum int32
maxConcurrentNum int32
queueIdChannel chan int64
workerQueue chan task
tasks chan task
idle bool
workerNum int32
cbChannel chan func(error)
mapTaskQueueSession map[int64]*queue.Deque[task]
waitWorker sync.WaitGroup
waitDispatch sync.WaitGroup
}
func (d *dispatch) open(minGoroutineNum int32, maxGoroutineNum int32, tasks chan task, cbChannel chan func(error)) {
d.minConcurrentNum = minGoroutineNum
d.maxConcurrentNum = maxGoroutineNum
d.tasks = tasks
d.mapTaskQueueSession = make(map[int64]*queue.Deque[task], maxTaskQueueSessionId)
d.workerQueue = make(chan task)
d.cbChannel = cbChannel
d.queueIdChannel = make(chan int64, cap(tasks))
d.waitDispatch.Add(1)
go d.run()
}
func (d *dispatch) run() {
defer d.waitDispatch.Done()
timeout := time.NewTimer(time.Duration(atomic.LoadInt64(&idleTimeout)))
for {
select {
case queueId := <-d.queueIdChannel:
d.processqueueEvent(queueId)
default:
select {
case t, ok := <-d.tasks:
if ok == false {
return
}
d.processTask(&t)
case queueId := <-d.queueIdChannel:
d.processqueueEvent(queueId)
case <-timeout.C:
d.processTimer()
if atomic.LoadInt32(&d.minConcurrentNum) == -1 && len(d.tasks) == 0 {
atomic.StoreInt64(&idleTimeout,int64(time.Millisecond * 10))
}
timeout.Reset(time.Duration(atomic.LoadInt64(&idleTimeout)))
}
}
if atomic.LoadInt32(&d.minConcurrentNum) == -1 && d.workerNum == 0 {
d.waitWorker.Wait()
d.cbChannel <- nil
return
}
}
}
func (d *dispatch) processTimer() {
if d.idle == true && d.workerNum > atomic.LoadInt32(&d.minConcurrentNum) {
d.processIdle()
}
d.idle = true
}
func (d *dispatch) processqueueEvent(queueId int64) {
d.idle = false
queueSession := d.mapTaskQueueSession[queueId]
if queueSession == nil {
return
}
queueSession.PopFront()
if queueSession.Len() == 0 {
return
}
t := queueSession.Front()
d.executeTask(&t)
}
func (d *dispatch) executeTask(t *task) {
select {
case d.workerQueue <- *t:
return
default:
if d.workerNum < d.maxConcurrentNum {
var work worker
work.start(&d.waitWorker, t, d)
return
}
}
d.workerQueue <- *t
}
func (d *dispatch) processTask(t *task) {
d.idle = false
//处理有排队任务
if t.queueId != 0 {
queueSession := d.mapTaskQueueSession[t.queueId]
if queueSession == nil {
queueSession = &queue.Deque[task]{}
d.mapTaskQueueSession[t.queueId] = queueSession
}
//没有正在执行的任务,则直接执行
if queueSession.Len() == 0 {
d.executeTask(t)
}
queueSession.PushBack(*t)
return
}
//普通任务
d.executeTask(t)
}
func (d *dispatch) processIdle() {
select {
case d.workerQueue <- task{}:
d.workerNum--
default:
}
}
func (d *dispatch) pushQueueTaskFinishEvent(queueId int64) {
d.queueIdChannel <- queueId
}
func (c *dispatch) pushAsyncDoCallbackEvent(cb func(err error)) {
if cb == nil {
//不需要回调的情况
return
}
c.cbChannel <- cb
}
func (d *dispatch) close() {
atomic.StoreInt32(&d.minConcurrentNum, -1)
breakFor:
for {
select {
case cb := <-d.cbChannel:
if cb == nil {
break breakFor
}
cb(nil)
}
}
d.waitDispatch.Wait()
}
func (d *dispatch) DoCallback(cb func(err error)) {
defer func() {
if r := recover(); r != nil {
buf := make([]byte, 4096)
l := runtime.Stack(buf, false)
errString := fmt.Sprint(r)
log.Dump(string(buf[:l]),log.String("error",errString))
}
}()
cb(nil)
}

78
concurrent/worker.go Normal file
View File

@@ -0,0 +1,78 @@
package concurrent
import (
"sync"
"errors"
"fmt"
"runtime"
"github.com/duanhf2012/origin/v2/log"
)
type task struct {
queueId int64
fn func() bool
cb func(err error)
}
type worker struct {
*dispatch
}
func (t *task) isExistTask() bool {
return t.fn == nil
}
func (w *worker) start(waitGroup *sync.WaitGroup, t *task, d *dispatch) {
w.dispatch = d
d.workerNum += 1
waitGroup.Add(1)
go w.run(waitGroup, *t)
}
func (w *worker) run(waitGroup *sync.WaitGroup, t task) {
defer waitGroup.Done()
w.exec(&t)
for {
select {
case tw := <-w.workerQueue:
if tw.isExistTask() {
//exit goroutine
log.Info("worker goroutine exit")
return
}
w.exec(&tw)
}
}
}
func (w *worker) exec(t *task) {
defer func() {
if r := recover(); r != nil {
buf := make([]byte, 4096)
l := runtime.Stack(buf, false)
errString := fmt.Sprint(r)
cb := t.cb
t.cb = func(err error) {
cb(errors.New(errString))
}
log.Dump(string(buf[:l]),log.String("error",errString))
w.endCallFun(true,t)
}
}()
w.endCallFun(t.fn(),t)
}
func (w *worker) endCallFun(isDocallBack bool,t *task) {
if isDocallBack {
w.pushAsyncDoCallbackEvent(t.cb)
}
if t.queueId != 0 {
w.pushQueueTaskFinishEvent(t.queueId)
}
}

View File

@@ -11,8 +11,9 @@ type CommandFunctionCB func(args interface{}) error
var commandList []*command
var programName string
const(
boolType valueType = iota
stringType valueType = iota
boolType valueType = 0
stringType valueType = 1
intType valueType = 2
)
type command struct{
@@ -20,6 +21,7 @@ type command struct{
name string
bValue bool
strValue string
intValue int
usage string
fn CommandFunctionCB
}
@@ -29,6 +31,8 @@ func (cmd *command) execute() error{
return cmd.fn(cmd.bValue)
}else if cmd.valType == stringType {
return cmd.fn(cmd.strValue)
}else if cmd.valType == intType {
return cmd.fn(cmd.intValue)
}else{
return fmt.Errorf("Unknow command type.")
}
@@ -72,6 +76,16 @@ func RegisterCommandBool(cmdName string, defaultValue bool, usage string,fn Comm
commandList = append(commandList,&cmd)
}
func RegisterCommandInt(cmdName string, defaultValue int, usage string,fn CommandFunctionCB){
var cmd command
cmd.valType = intType
cmd.name = cmdName
cmd.fn = fn
cmd.usage = usage
flag.IntVar(&cmd.intValue, cmdName, defaultValue, usage)
commandList = append(commandList,&cmd)
}
func RegisterCommandString(cmdName string, defaultValue string, usage string,fn CommandFunctionCB){
var cmd command
cmd.valType = stringType

View File

@@ -2,12 +2,11 @@ package event
import (
"fmt"
"github.com/duanhf2012/origin/log"
"github.com/duanhf2012/origin/v2/log"
"runtime"
"sync"
)
//事件接受器
type EventCallBack func(event IEvent)
@@ -216,7 +215,7 @@ func (processor *EventProcessor) EventHandler(ev IEvent) {
buf := make([]byte, 4096)
l := runtime.Stack(buf, false)
errString := fmt.Sprint(r)
log.SError("core dump info[",errString,"]\n",string(buf[:l]))
log.Dump(string(buf[:l]),log.String("error",errString))
}
}()
@@ -229,16 +228,14 @@ func (processor *EventProcessor) EventHandler(ev IEvent) {
}
}
func (processor *EventProcessor) castEvent(event IEvent){
if processor.mapListenerEvent == nil {
log.SError("mapListenerEvent not init!")
log.Error("mapListenerEvent not init!")
return
}
eventProcessor,ok := processor.mapListenerEvent[event.GetEventType()]
if ok == false || processor == nil{
log.SDebug("event type ",event.GetEventType()," not listen.")
return
}
@@ -246,3 +243,4 @@ func (processor *EventProcessor) castEvent(event IEvent){
proc.PushEvent(event)
}
}

24
event/eventpool.go Normal file
View File

@@ -0,0 +1,24 @@
package event
import "github.com/duanhf2012/origin/v2/util/sync"
// eventPool的内存池,缓存Event
const defaultMaxEventChannelNum = 2000000
var eventPool = sync.NewPoolEx(make(chan sync.IPoolData, defaultMaxEventChannelNum), func() sync.IPoolData {
return &Event{}
})
func NewEvent() *Event{
return eventPool.Get().(*Event)
}
func DeleteEvent(event IEvent){
eventPool.Put(event.(sync.IPoolData))
}
func SetEventPoolSize(eventPoolSize int){
eventPool = sync.NewPoolEx(make(chan sync.IPoolData, eventPoolSize), func() sync.IPoolData {
return &Event{}
})
}

View File

@@ -7,11 +7,18 @@ const (
ServiceRpcRequestEvent EventType = -1
ServiceRpcResponseEvent EventType = -2
Sys_Event_Tcp EventType = -3
Sys_Event_Http_Event EventType = -4
Sys_Event_WebSocket EventType = -5
Sys_Event_Rpc_Event EventType = -6
Sys_Event_User_Define EventType = 1
Sys_Event_Tcp EventType = -3
Sys_Event_Http_Event EventType = -4
Sys_Event_WebSocket EventType = -5
Sys_Event_Node_Conn_Event EventType = -6
Sys_Event_Nats_Conn_Event EventType = -7
Sys_Event_DiscoverService EventType = -8
Sys_Event_DiscardGoroutine EventType = -9
Sys_Event_QueueTaskFinish EventType = -10
Sys_Event_Retire EventType = -11
Sys_Event_EtcdDiscovery EventType = -12
Sys_Event_Gin_Event EventType = -13
Sys_Event_User_Define EventType = 1
)

57
go.mod
View File

@@ -1,13 +1,54 @@
module github.com/duanhf2012/origin
module github.com/duanhf2012/origin/v2
go 1.17
go 1.21
require (
github.com/go-sql-driver/mysql v1.5.0
github.com/golang/protobuf v1.4.3
github.com/gomodule/redigo v1.8.3
github.com/gorilla/websocket v1.4.2
github.com/json-iterator/go v1.1.10
google.golang.org/protobuf v1.25.0
github.com/go-sql-driver/mysql v1.6.0
github.com/gomodule/redigo v1.8.8
github.com/google/uuid v1.6.0
github.com/gorilla/websocket v1.5.0
github.com/json-iterator/go v1.1.12
github.com/nats-io/nats.go v1.34.1
github.com/pierrec/lz4/v4 v4.1.18
github.com/shirou/gopsutil v3.21.11+incompatible
go.etcd.io/etcd/api/v3 v3.5.13
go.etcd.io/etcd/client/v3 v3.5.13
go.mongodb.org/mongo-driver v1.9.1
go.uber.org/zap v1.27.0
google.golang.org/protobuf v1.33.0
gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22
)
require (
github.com/coreos/go-semver v0.3.0 // indirect
github.com/coreos/go-systemd/v22 v22.3.2 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-stack/stack v1.8.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/golang/snappy v0.0.1 // indirect
github.com/klauspost/compress v1.17.2 // indirect
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/nats-io/nkeys v0.4.7 // indirect
github.com/nats-io/nuid v1.0.1 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/tklauser/go-sysconf v0.3.13 // indirect
github.com/tklauser/numcpus v0.7.0 // indirect
github.com/xdg-go/pbkdf2 v1.0.0 // indirect
github.com/xdg-go/scram v1.0.2 // indirect
github.com/xdg-go/stringprep v1.0.2 // indirect
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d // indirect
github.com/yusufpapurcu/wmi v1.2.4 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.13 // indirect
go.uber.org/multierr v1.10.0 // indirect
golang.org/x/crypto v0.18.0 // indirect
golang.org/x/net v0.17.0 // indirect
golang.org/x/sync v0.3.0 // indirect
golang.org/x/sys v0.16.0 // indirect
golang.org/x/text v0.14.0 // indirect
google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect
google.golang.org/grpc v1.59.0 // indirect
)

219
go.sum
View File

@@ -1,95 +1,162 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI=
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs=
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/gomodule/redigo v1.8.3 h1:HR0kYDX2RJZvAup8CsiJwxB4dTCSC0AaUq6S4SiLwUc=
github.com/gomodule/redigo v1.8.3/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/gomodule/redigo v1.8.8 h1:f6cXq6RRfiyrOJEV7p3JhLDlmawGBVBBP1MggY8Mo4E=
github.com/gomodule/redigo v1.8.8/go.mod h1:7ArFNvsTjH8GMMzB4uy1snslv2BwmginuMs06a1uzZE=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.17.2 h1:RlWWUY/Dr4fL8qk9YG7DTZ7PDgME2V4csBXA8L/ixi4=
github.com/klauspost/compress v1.17.2/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
github.com/nats-io/nats.go v1.34.1 h1:syWey5xaNHZgicYBemv0nohUPPmaLteiBEUT6Q5+F/4=
github.com/nats-io/nats.go v1.34.1/go.mod h1:Ubdu4Nh9exXdSz0RVWRFBbRfrbSxOYd26oF0wkWclB8=
github.com/nats-io/nkeys v0.4.7 h1:RwNJbbIdYCoClSDNY7QVKZlyb/wfT6ugvFCiKy6vDvI=
github.com/nats-io/nkeys v0.4.7/go.mod h1:kqXRgRDPlGy7nGaEDMuYzmiJCIAAWDK0IMBtDmGD0nc=
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ=
github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4=
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
github.com/tklauser/go-sysconf v0.3.13 h1:GBUpcahXSpR2xN01jhkNAbTLRk2Yzgggk8IM08lq3r4=
github.com/tklauser/go-sysconf v0.3.13/go.mod h1:zwleP4Q4OehZHGn4CYZDipCgg9usW5IJePewFCGVEa0=
github.com/tklauser/numcpus v0.7.0 h1:yjuerZP127QG9m5Zh/mSO4wqurYil27tHrqwRoRjpr4=
github.com/tklauser/numcpus v0.7.0/go.mod h1:bb6dMVcj8A42tSE7i32fsIUCbQNllK5iDguyOZRUzAY=
github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c=
github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
github.com/xdg-go/scram v1.0.2 h1:akYIkZ28e6A96dkWNJQu3nmCzH3YfwMPQExUYDaRv7w=
github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs=
github.com/xdg-go/stringprep v1.0.2 h1:6iq84/ryjjeRmMJwxutI51F2GIPlP5BfTvXHeYjyhBc=
github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM=
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d h1:splanxYIlg+5LfHAM6xpdFEAYOk8iySO56hMFq6uLyA=
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
go.etcd.io/etcd/api/v3 v3.5.13 h1:8WXU2/NBge6AUF1K1gOexB6e07NgsN1hXK0rSTtgSp4=
go.etcd.io/etcd/api/v3 v3.5.13/go.mod h1:gBqlqkcMMZMVTMm4NDZloEVJzxQOQIls8splbqBDa0c=
go.etcd.io/etcd/client/pkg/v3 v3.5.13 h1:RVZSAnWWWiI5IrYAXjQorajncORbS0zI48LQlE2kQWg=
go.etcd.io/etcd/client/pkg/v3 v3.5.13/go.mod h1:XxHT4u1qU12E2+po+UVPrEeL94Um6zL58ppuJWXSAB8=
go.etcd.io/etcd/client/v3 v3.5.13 h1:o0fHTNJLeO0MyVbc7I3fsCf6nrOqn5d+diSarKnB2js=
go.etcd.io/etcd/client/v3 v3.5.13/go.mod h1:cqiAeY8b5DEEcpxvgWKsbLIWNM/8Wy2xJSDMtioMcoI=
go.mongodb.org/mongo-driver v1.9.1 h1:m078y9v7sBItkt1aaoe2YlvWEXcD263e1a4E1fBrJ1c=
go.mongodb.org/mongo-driver v1.9.1/go.mod h1:0sQWfOeY63QTntERDJJ/0SuKK0T1uVSgKCuAROlKEPY=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ=
go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc=
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU=
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d h1:VBu5YqKPv6XiJ199exd8Br+Aetz+o08F+PLMnwJQHAY=
google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4=
google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d h1:DoPTO70H+bcDXcd39vOqb2viZxgqeBeSGtZ55yZU4/Q=
google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M=
google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk=
google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98=
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22 h1:VpOs+IwYnYBaFnrNAeB8UUWtL3vEUnzSCL1nVjPhqrw=
gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View File

@@ -2,27 +2,20 @@ package log // import "go.uber.org/zap/buffer"
import (
"strconv"
"sync"
)
const _size = 9216
type Buffer struct {
bs []byte
mu sync.Mutex // ensures atomic writes; protects the following fields
//mu sync.Mutex // ensures atomic writes; protects the following fields
}
func (buff *Buffer) Init(){
buff.bs = make([]byte,_size)
}
func (buff *Buffer) Locker() {
buff.mu.Lock()
}
func (buff *Buffer) UnLocker() {
buff.mu.Unlock()
}
// AppendByte writes a single byte to the Buffer.
func (b *Buffer) AppendByte(v byte) {

147
log/handler.go Normal file
View File

@@ -0,0 +1,147 @@
package log
import (
"context"
"io"
"log/slog"
"path/filepath"
"runtime"
"runtime/debug"
"sync"
)
type IOriginHandler interface {
slog.Handler
Lock()
UnLock()
}
type BaseHandler struct {
addSource bool
w io.Writer
locker sync.Mutex
}
type OriginTextHandler struct {
BaseHandler
*slog.TextHandler
}
type OriginJsonHandler struct {
BaseHandler
*slog.JSONHandler
}
func getStrLevel(level slog.Level) string{
switch level {
case LevelTrace:
return "Trace"
case LevelDebug:
return "Debug"
case LevelInfo:
return "Info"
case LevelWarning:
return "Warning"
case LevelError:
return "Error"
case LevelStack:
return "Stack"
case LevelDump:
return "Dump"
case LevelFatal:
return "Fatal"
}
return ""
}
func defaultReplaceAttr(groups []string, a slog.Attr) slog.Attr {
if a.Key == slog.LevelKey {
level := a.Value.Any().(slog.Level)
a.Value = slog.StringValue(getStrLevel(level))
}else if a.Key == slog.TimeKey && len(groups) == 0 {
a.Value = slog.StringValue(a.Value.Time().Format("2006/01/02 15:04:05"))
}else if a.Key == slog.SourceKey {
source := a.Value.Any().(*slog.Source)
source.File = filepath.Base(source.File)
}
return a
}
func NewOriginTextHandler(level slog.Level,w io.Writer,addSource bool,replaceAttr func([]string,slog.Attr) slog.Attr) slog.Handler{
var textHandler OriginTextHandler
textHandler.addSource = addSource
textHandler.w = w
textHandler.TextHandler = slog.NewTextHandler(w,&slog.HandlerOptions{
AddSource: addSource,
Level: level,
ReplaceAttr: replaceAttr,
})
return &textHandler
}
func (oh *OriginTextHandler) Handle(context context.Context, record slog.Record) error{
oh.Fill(context,&record)
oh.locker.Lock()
defer oh.locker.Unlock()
if record.Level == LevelStack || record.Level == LevelFatal{
err := oh.TextHandler.Handle(context, record)
oh.logStack(&record)
return err
}else if record.Level == LevelDump {
strDump := record.Message
record.Message = "dump info"
err := oh.TextHandler.Handle(context, record)
oh.w.Write([]byte(strDump))
return err
}
return oh.TextHandler.Handle(context, record)
}
func (b *BaseHandler) logStack(record *slog.Record){
b.w.Write(debug.Stack())
}
func (b *BaseHandler) Lock(){
b.locker.Lock()
}
func (b *BaseHandler) UnLock(){
b.locker.Unlock()
}
func NewOriginJsonHandler(level slog.Level,w io.Writer,addSource bool,replaceAttr func([]string,slog.Attr) slog.Attr) slog.Handler{
var jsonHandler OriginJsonHandler
jsonHandler.addSource = addSource
jsonHandler.w = w
jsonHandler.JSONHandler = slog.NewJSONHandler(w,&slog.HandlerOptions{
AddSource: addSource,
Level: level,
ReplaceAttr: replaceAttr,
})
return &jsonHandler
}
func (oh *OriginJsonHandler) Handle(context context.Context, record slog.Record) error{
oh.Fill(context,&record)
if record.Level == LevelStack || record.Level == LevelFatal || record.Level == LevelDump{
record.Add("stack",debug.Stack())
}
oh.locker.Lock()
defer oh.locker.Unlock()
return oh.JSONHandler.Handle(context, record)
}
func (b *BaseHandler) Fill(context context.Context, record *slog.Record) {
if b.addSource {
var pcs [1]uintptr
runtime.Callers(7, pcs[:])
record.PC = pcs[0]
}
}

1007
log/log.go

File diff suppressed because it is too large Load Diff

View File

@@ -2,7 +2,8 @@ package network
import (
"crypto/tls"
"github.com/duanhf2012/origin/log"
"errors"
"github.com/duanhf2012/origin/v2/log"
"net/http"
"time"
)
@@ -37,12 +38,16 @@ func (slf *HttpServer) Start() {
}
func (slf *HttpServer) startListen() error {
if slf.httpServer != nil {
return errors.New("Duplicate start not allowed")
}
var tlsCaList []tls.Certificate
var tlsConfig *tls.Config
for _, caFile := range slf.caFileList {
cer, err := tls.LoadX509KeyPair(caFile.CertFile, caFile.Keyfile)
if err != nil {
log.SFatal("Load CA [",caFile.CertFile,"]-[",caFile.Keyfile,"] file is fail:",err.Error())
log.Fatal("Load CA file is fail",log.String("error",err.Error()),log.String("certFile",caFile.CertFile),log.String("keyFile",caFile.Keyfile))
return err
}
tlsCaList = append(tlsCaList, cer)
@@ -69,7 +74,7 @@ func (slf *HttpServer) startListen() error {
}
if err != nil {
log.SFatal("Listen for address ",slf.listenAddr," failure:",err.Error())
log.Fatal("Listen failure",log.String("error",err.Error()),log.String("addr:",slf.listenAddr))
return err
}

View File

@@ -3,7 +3,8 @@ package processor
import (
"encoding/json"
"fmt"
"github.com/duanhf2012/origin/network"
"github.com/duanhf2012/origin/v2/log"
"github.com/duanhf2012/origin/v2/util/bytespool"
"reflect"
)
@@ -12,9 +13,9 @@ type MessageJsonInfo struct {
msgHandler MessageJsonHandler
}
type MessageJsonHandler func(clientId uint64,msg interface{})
type ConnectJsonHandler func(clientId uint64)
type UnknownMessageJsonHandler func(clientId uint64,msg []byte)
type MessageJsonHandler func(clientId string,msg interface{})
type ConnectJsonHandler func(clientId string)
type UnknownMessageJsonHandler func(clientId string,msg []byte)
type JsonProcessor struct {
mapMsg map[uint16]MessageJsonInfo
@@ -23,7 +24,7 @@ type JsonProcessor struct {
unknownMessageHandler UnknownMessageJsonHandler
connectHandler ConnectJsonHandler
disconnectHandler ConnectJsonHandler
network.INetMempool
bytespool.IBytesMempool
}
type JsonPackInfo struct {
@@ -34,7 +35,7 @@ type JsonPackInfo struct {
func NewJsonProcessor() *JsonProcessor {
processor := &JsonProcessor{mapMsg:map[uint16]MessageJsonInfo{}}
processor.INetMempool = network.NewMemAreaPool()
processor.IBytesMempool = bytespool.NewMemAreaPool()
return processor
}
@@ -44,20 +45,20 @@ func (jsonProcessor *JsonProcessor) SetByteOrder(littleEndian bool) {
}
// must goroutine safe
func (jsonProcessor *JsonProcessor ) MsgRoute(msg interface{},userdata interface{}) error{
func (jsonProcessor *JsonProcessor ) MsgRoute(clientId string,msg interface{}) error{
pPackInfo := msg.(*JsonPackInfo)
v,ok := jsonProcessor.mapMsg[pPackInfo.typ]
if ok == false {
return fmt.Errorf("cannot find msgtype %d is register!",pPackInfo.typ)
}
v.msgHandler(userdata.(uint64),pPackInfo.msg)
v.msgHandler(clientId,pPackInfo.msg)
return nil
}
func (jsonProcessor *JsonProcessor) Unmarshal(data []byte) (interface{}, error) {
func (jsonProcessor *JsonProcessor) Unmarshal(clientId string,data []byte) (interface{}, error) {
typeStruct := struct {Type int `json:"typ"`}{}
defer jsonProcessor.ReleaseByteSlice(data)
defer jsonProcessor.ReleaseBytes(data)
err := json.Unmarshal(data, &typeStruct)
if err != nil {
return nil, err
@@ -78,7 +79,7 @@ func (jsonProcessor *JsonProcessor) Unmarshal(data []byte) (interface{}, error)
return &JsonPackInfo{typ:msgType,msg:msgData},nil
}
func (jsonProcessor *JsonProcessor) Marshal(msg interface{}) ([]byte, error) {
func (jsonProcessor *JsonProcessor) Marshal(clientId string,msg interface{}) ([]byte, error) {
rawMsg,err := json.Marshal(msg)
if err != nil {
return nil,err
@@ -103,16 +104,26 @@ func (jsonProcessor *JsonProcessor) MakeRawMsg(msgType uint16,msg []byte) *JsonP
return &JsonPackInfo{typ:msgType,rawMsg:msg}
}
func (jsonProcessor *JsonProcessor) UnknownMsgRoute(msg interface{}, userData interface{}){
jsonProcessor.unknownMessageHandler(userData.(uint64),msg.([]byte))
func (jsonProcessor *JsonProcessor) UnknownMsgRoute(clientId string,msg interface{}){
if jsonProcessor.unknownMessageHandler==nil {
log.Debug("Unknown message",log.String("clientId",clientId))
return
}
jsonProcessor.unknownMessageHandler(clientId,msg.([]byte))
}
func (jsonProcessor *JsonProcessor) ConnectedRoute(userData interface{}){
jsonProcessor.connectHandler(userData.(uint64))
func (jsonProcessor *JsonProcessor) ConnectedRoute(clientId string){
if jsonProcessor.connectHandler != nil {
jsonProcessor.connectHandler(clientId)
}
}
func (jsonProcessor *JsonProcessor) DisConnectedRoute(userData interface{}){
jsonProcessor.disconnectHandler(userData.(uint64))
func (jsonProcessor *JsonProcessor) DisConnectedRoute(clientId string){
if jsonProcessor.disconnectHandler != nil {
jsonProcessor.disconnectHandler(clientId)
}
}
func (jsonProcessor *JsonProcessor) RegisterUnknownMsg(unknownMessageHandler UnknownMessageJsonHandler){

View File

@@ -3,8 +3,8 @@ package processor
import (
"encoding/binary"
"fmt"
"github.com/duanhf2012/origin/network"
"github.com/gogo/protobuf/proto"
"github.com/duanhf2012/origin/v2/util/bytespool"
"google.golang.org/protobuf/proto"
"reflect"
)
@@ -13,9 +13,9 @@ type MessageInfo struct {
msgHandler MessageHandler
}
type MessageHandler func(clientId uint64, msg proto.Message)
type ConnectHandler func(clientId uint64)
type UnknownMessageHandler func(clientId uint64, msg []byte)
type MessageHandler func(clientId string, msg proto.Message)
type ConnectHandler func(clientId string)
type UnknownMessageHandler func(clientId string, msg []byte)
const MsgTypeSize = 2
@@ -26,7 +26,7 @@ type PBProcessor struct {
unknownMessageHandler UnknownMessageHandler
connectHandler ConnectHandler
disconnectHandler ConnectHandler
network.INetMempool
bytespool.IBytesMempool
}
type PBPackInfo struct {
@@ -37,7 +37,7 @@ type PBPackInfo struct {
func NewPBProcessor() *PBProcessor {
processor := &PBProcessor{mapMsg: map[uint16]MessageInfo{}}
processor.INetMempool = network.NewMemAreaPool()
processor.IBytesMempool = bytespool.NewMemAreaPool()
return processor
}
@@ -54,7 +54,7 @@ func (slf *PBPackInfo) GetMsg() proto.Message {
}
// must goroutine safe
func (pbProcessor *PBProcessor) MsgRoute(clientId uint64, msg interface{}) error {
func (pbProcessor *PBProcessor) MsgRoute(clientId string, msg interface{}) error {
pPackInfo := msg.(*PBPackInfo)
v, ok := pbProcessor.mapMsg[pPackInfo.typ]
if ok == false {
@@ -66,8 +66,13 @@ func (pbProcessor *PBProcessor) MsgRoute(clientId uint64, msg interface{}) error
}
// must goroutine safe
func (pbProcessor *PBProcessor) Unmarshal(clientId uint64, data []byte) (interface{}, error) {
defer pbProcessor.ReleaseByteSlice(data)
func (pbProcessor *PBProcessor) Unmarshal(clientId string, data []byte) (interface{}, error) {
defer pbProcessor.ReleaseBytes(data)
return pbProcessor.UnmarshalWithOutRelease(clientId, data)
}
// unmarshal but not release data
func (pbProcessor *PBProcessor) UnmarshalWithOutRelease(clientId string, data []byte) (interface{}, error) {
var msgType uint16
if pbProcessor.LittleEndian == true {
msgType = binary.LittleEndian.Uint16(data[:2])
@@ -90,7 +95,7 @@ func (pbProcessor *PBProcessor) Unmarshal(clientId uint64, data []byte) (interfa
}
// must goroutine safe
func (pbProcessor *PBProcessor) Marshal(clientId uint64, msg interface{}) ([]byte, error) {
func (pbProcessor *PBProcessor) Marshal(clientId string, msg interface{}) ([]byte, error) {
pMsg := msg.(*PBPackInfo)
var err error
@@ -128,16 +133,16 @@ func (pbProcessor *PBProcessor) MakeRawMsg(msgType uint16, msg []byte) *PBPackIn
return &PBPackInfo{typ: msgType, rawMsg: msg}
}
func (pbProcessor *PBProcessor) UnknownMsgRoute(clientId uint64, msg interface{}) {
func (pbProcessor *PBProcessor) UnknownMsgRoute(clientId string, msg interface{}) {
pbProcessor.unknownMessageHandler(clientId, msg.([]byte))
}
// connect event
func (pbProcessor *PBProcessor) ConnectedRoute(clientId uint64) {
func (pbProcessor *PBProcessor) ConnectedRoute(clientId string) {
pbProcessor.connectHandler(clientId)
}
func (pbProcessor *PBProcessor) DisConnectedRoute(clientId uint64) {
func (pbProcessor *PBProcessor) DisConnectedRoute(clientId string) {
pbProcessor.disconnectHandler(clientId)
}

View File

@@ -10,9 +10,9 @@ type RawMessageInfo struct {
msgHandler RawMessageHandler
}
type RawMessageHandler func(clientId uint64,packType uint16,msg []byte)
type RawConnectHandler func(clientId uint64)
type UnknownRawMessageHandler func(clientId uint64,msg []byte)
type RawMessageHandler func(clientId string,packType uint16,msg []byte)
type RawConnectHandler func(clientId string)
type UnknownRawMessageHandler func(clientId string,msg []byte)
const RawMsgTypeSize = 2
type PBRawProcessor struct {
@@ -38,14 +38,14 @@ func (pbRawProcessor *PBRawProcessor) SetByteOrder(littleEndian bool) {
}
// must goroutine safe
func (pbRawProcessor *PBRawProcessor ) MsgRoute(clientId uint64, msg interface{}) error{
func (pbRawProcessor *PBRawProcessor ) MsgRoute(clientId string, msg interface{}) error{
pPackInfo := msg.(*PBRawPackInfo)
pbRawProcessor.msgHandler(clientId,pPackInfo.typ,pPackInfo.rawMsg)
return nil
}
// must goroutine safe
func (pbRawProcessor *PBRawProcessor ) Unmarshal(clientId uint64,data []byte) (interface{}, error) {
func (pbRawProcessor *PBRawProcessor ) Unmarshal(clientId string,data []byte) (interface{}, error) {
var msgType uint16
if pbRawProcessor.LittleEndian == true {
msgType = binary.LittleEndian.Uint16(data[:2])
@@ -57,7 +57,7 @@ func (pbRawProcessor *PBRawProcessor ) Unmarshal(clientId uint64,data []byte) (i
}
// must goroutine safe
func (pbRawProcessor *PBRawProcessor ) Marshal(clientId uint64,msg interface{}) ([]byte, error){
func (pbRawProcessor *PBRawProcessor ) Marshal(clientId string,msg interface{}) ([]byte, error){
pMsg := msg.(*PBRawPackInfo)
buff := make([]byte, 2, len(pMsg.rawMsg)+RawMsgTypeSize)
@@ -78,10 +78,9 @@ func (pbRawProcessor *PBRawProcessor) SetRawMsgHandler(handle RawMessageHandler)
func (pbRawProcessor *PBRawProcessor) MakeRawMsg(msgType uint16,msg []byte,pbRawPackInfo *PBRawPackInfo) {
pbRawPackInfo.typ = msgType
pbRawPackInfo.rawMsg = msg
//return &PBRawPackInfo{typ:msgType,rawMsg:msg}
}
func (pbRawProcessor *PBRawProcessor) UnknownMsgRoute(clientId uint64,msg interface{}){
func (pbRawProcessor *PBRawProcessor) UnknownMsgRoute(clientId string,msg interface{}){
if pbRawProcessor.unknownMessageHandler == nil {
return
}
@@ -89,11 +88,11 @@ func (pbRawProcessor *PBRawProcessor) UnknownMsgRoute(clientId uint64,msg interf
}
// connect event
func (pbRawProcessor *PBRawProcessor) ConnectedRoute(clientId uint64){
func (pbRawProcessor *PBRawProcessor) ConnectedRoute(clientId string){
pbRawProcessor.connectHandler(clientId)
}
func (pbRawProcessor *PBRawProcessor) DisConnectedRoute(clientId uint64){
func (pbRawProcessor *PBRawProcessor) DisConnectedRoute(clientId string){
pbRawProcessor.disconnectHandler(clientId)
}

View File

@@ -3,31 +3,25 @@ package processor
type IProcessor interface {
// must goroutine safe
MsgRoute(clientId uint64,msg interface{}) error
MsgRoute(clientId string,msg interface{}) error
//must goroutine safe
UnknownMsgRoute(clientId uint64,msg interface{})
UnknownMsgRoute(clientId string,msg interface{})
// connect event
ConnectedRoute(clientId uint64)
DisConnectedRoute(clientId uint64)
ConnectedRoute(clientId string)
DisConnectedRoute(clientId string)
// must goroutine safe
Unmarshal(clientId uint64,data []byte) (interface{}, error)
Unmarshal(clientId string,data []byte) (interface{}, error)
// must goroutine safe
Marshal(clientId uint64,msg interface{}) ([]byte, error)
Marshal(clientId string,msg interface{}) ([]byte, error)
}
type IRawProcessor interface {
SetByteOrder(littleEndian bool)
MsgRoute(clientId uint64,msg interface{}) error
Unmarshal(clientId uint64,data []byte) (interface{}, error)
Marshal(clientId uint64,msg interface{}) ([]byte, error)
IProcessor
SetByteOrder(littleEndian bool)
SetRawMsgHandler(handle RawMessageHandler)
MakeRawMsg(msgType uint16,msg []byte,pbRawPackInfo *PBRawPackInfo)
UnknownMsgRoute(clientId uint64,msg interface{})
ConnectedRoute(clientId uint64)
DisConnectedRoute(clientId uint64)
SetUnknownMsgHandler(unknownMessageHandler UnknownRawMessageHandler)
SetConnectedHandler(connectHandler RawConnectHandler)
SetDisConnectedHandler(disconnectHandler RawConnectHandler)

View File

@@ -1,7 +1,7 @@
package network
import (
"github.com/duanhf2012/origin/log"
"github.com/duanhf2012/origin/v2/log"
"net"
"sync"
"time"
@@ -13,6 +13,8 @@ type TCPClient struct {
ConnNum int
ConnectInterval time.Duration
PendingWriteNum int
ReadDeadline time.Duration
WriteDeadline time.Duration
AutoReconnect bool
NewAgent func(*TCPConn) Agent
cons ConnSet
@@ -20,11 +22,7 @@ type TCPClient struct {
closeFlag bool
// msg parser
LenMsgLen int
MinMsgLen uint32
MaxMsgLen uint32
LittleEndian bool
msgParser *MsgParser
MsgParser
}
func (client *TCPClient) Start() {
@@ -42,31 +40,56 @@ func (client *TCPClient) init() {
if client.ConnNum <= 0 {
client.ConnNum = 1
log.SRelease("invalid ConnNum, reset to ", client.ConnNum)
log.Info("invalid ConnNum",log.Int("reset", client.ConnNum))
}
if client.ConnectInterval <= 0 {
client.ConnectInterval = 3 * time.Second
log.SRelease("invalid ConnectInterval, reset to ", client.ConnectInterval)
log.Info("invalid ConnectInterval",log.Duration("reset", client.ConnectInterval))
}
if client.PendingWriteNum <= 0 {
client.PendingWriteNum = 1000
log.SRelease("invalid PendingWriteNum, reset to ", client.PendingWriteNum)
log.Info("invalid PendingWriteNum",log.Int("reset",client.PendingWriteNum))
}
if client.ReadDeadline == 0 {
client.ReadDeadline = 15*time.Second
log.Info("invalid ReadDeadline",log.Int64("reset", int64(client.ReadDeadline.Seconds())))
}
if client.WriteDeadline == 0 {
client.WriteDeadline = 15*time.Second
log.Info("invalid WriteDeadline",log.Int64("reset", int64(client.WriteDeadline.Seconds())))
}
if client.NewAgent == nil {
log.SFatal("NewAgent must not be nil")
log.Fatal("NewAgent must not be nil")
}
if client.cons != nil {
log.SFatal("client is running")
log.Fatal("client is running")
}
if client.MinMsgLen == 0 {
client.MinMsgLen = Default_MinMsgLen
}
if client.MaxMsgLen == 0 {
client.MaxMsgLen = Default_MaxMsgLen
}
if client.LenMsgLen ==0 {
client.LenMsgLen = Default_LenMsgLen
}
maxMsgLen := client.MsgParser.getMaxMsgLen(client.LenMsgLen)
if client.MaxMsgLen > maxMsgLen {
client.MaxMsgLen = maxMsgLen
log.Info("invalid MaxMsgLen",log.Uint32("reset", maxMsgLen))
}
client.cons = make(ConnSet)
client.closeFlag = false
client.MsgParser.init()
}
// msg parser
msgParser := NewMsgParser()
msgParser.SetMsgLen(client.LenMsgLen, client.MinMsgLen, client.MaxMsgLen)
msgParser.SetByteOrder(client.LittleEndian)
client.msgParser = msgParser
func (client *TCPClient) GetCloseFlag() bool{
client.Lock()
defer client.Unlock()
return client.closeFlag
}
func (client *TCPClient) dial() net.Conn {
@@ -79,7 +102,7 @@ func (client *TCPClient) dial() net.Conn {
return conn
}
log.SWarning("connect to ",client.Addr," error:", err.Error())
log.Warning("connect error ",log.String("error",err.Error()), log.String("Addr",client.Addr))
time.Sleep(client.ConnectInterval)
continue
}
@@ -93,7 +116,7 @@ reconnect:
if conn == nil {
return
}
client.Lock()
if client.closeFlag {
client.Unlock()
@@ -103,7 +126,7 @@ reconnect:
client.cons[conn] = struct{}{}
client.Unlock()
tcpConn := newTCPConn(conn, client.PendingWriteNum, client.msgParser)
tcpConn := newTCPConn(conn, client.PendingWriteNum, &client.MsgParser,client.WriteDeadline)
agent := client.NewAgent(tcpConn)
agent.Run()

View File

@@ -1,11 +1,12 @@
package network
import (
"github.com/duanhf2012/origin/log"
"errors"
"github.com/duanhf2012/origin/v2/log"
"net"
"sync"
"sync/atomic"
"time"
"errors"
)
type ConnSet map[net.Conn]struct{}
@@ -14,7 +15,7 @@ type TCPConn struct {
sync.Mutex
conn net.Conn
writeChan chan []byte
closeFlag bool
closeFlag int32
msgParser *MsgParser
}
@@ -27,7 +28,7 @@ func freeChannel(conn *TCPConn){
}
}
func newTCPConn(conn net.Conn, pendingWriteNum int, msgParser *MsgParser) *TCPConn {
func newTCPConn(conn net.Conn, pendingWriteNum int, msgParser *MsgParser,writeDeadline time.Duration) *TCPConn {
tcpConn := new(TCPConn)
tcpConn.conn = conn
tcpConn.writeChan = make(chan []byte, pendingWriteNum)
@@ -37,8 +38,10 @@ func newTCPConn(conn net.Conn, pendingWriteNum int, msgParser *MsgParser) *TCPCo
if b == nil {
break
}
conn.SetWriteDeadline(time.Now().Add(writeDeadline))
_, err := conn.Write(b)
tcpConn.msgParser.ReleaseByteSlice(b)
tcpConn.msgParser.ReleaseBytes(b)
if err != nil {
break
@@ -47,7 +50,7 @@ func newTCPConn(conn net.Conn, pendingWriteNum int, msgParser *MsgParser) *TCPCo
conn.Close()
tcpConn.Lock()
freeChannel(tcpConn)
tcpConn.closeFlag = true
atomic.StoreInt32(&tcpConn.closeFlag,1)
tcpConn.Unlock()
}()
@@ -58,9 +61,9 @@ func (tcpConn *TCPConn) doDestroy() {
tcpConn.conn.(*net.TCPConn).SetLinger(0)
tcpConn.conn.Close()
if !tcpConn.closeFlag {
if atomic.LoadInt32(&tcpConn.closeFlag)==0 {
close(tcpConn.writeChan)
tcpConn.closeFlag = true
atomic.StoreInt32(&tcpConn.closeFlag,1)
}
}
@@ -74,12 +77,12 @@ func (tcpConn *TCPConn) Destroy() {
func (tcpConn *TCPConn) Close() {
tcpConn.Lock()
defer tcpConn.Unlock()
if tcpConn.closeFlag {
if atomic.LoadInt32(&tcpConn.closeFlag)==1 {
return
}
tcpConn.doWrite(nil)
tcpConn.closeFlag = true
atomic.StoreInt32(&tcpConn.closeFlag,1)
}
func (tcpConn *TCPConn) GetRemoteIp() string {
@@ -89,7 +92,7 @@ func (tcpConn *TCPConn) GetRemoteIp() string {
func (tcpConn *TCPConn) doWrite(b []byte) error{
if len(tcpConn.writeChan) == cap(tcpConn.writeChan) {
tcpConn.ReleaseReadMsg(b)
log.SError("close conn: channel full")
log.Error("close conn: channel full")
tcpConn.doDestroy()
return errors.New("close conn: channel full")
}
@@ -102,7 +105,7 @@ func (tcpConn *TCPConn) doWrite(b []byte) error{
func (tcpConn *TCPConn) Write(b []byte) error{
tcpConn.Lock()
defer tcpConn.Unlock()
if tcpConn.closeFlag || b == nil {
if atomic.LoadInt32(&tcpConn.closeFlag)==1 || b == nil {
tcpConn.ReleaseReadMsg(b)
return errors.New("conn is close")
}
@@ -127,18 +130,18 @@ func (tcpConn *TCPConn) ReadMsg() ([]byte, error) {
}
func (tcpConn *TCPConn) ReleaseReadMsg(byteBuff []byte){
tcpConn.msgParser.ReleaseByteSlice(byteBuff)
tcpConn.msgParser.ReleaseBytes(byteBuff)
}
func (tcpConn *TCPConn) WriteMsg(args ...[]byte) error {
if tcpConn.closeFlag == true {
if atomic.LoadInt32(&tcpConn.closeFlag) == 1 {
return errors.New("conn is close")
}
return tcpConn.msgParser.Write(tcpConn, args...)
}
func (tcpConn *TCPConn) WriteRawMsg(args []byte) error {
if tcpConn.closeFlag == true {
if atomic.LoadInt32(&tcpConn.closeFlag) == 1 {
return errors.New("conn is close")
}
@@ -147,7 +150,7 @@ func (tcpConn *TCPConn) WriteRawMsg(args []byte) error {
func (tcpConn *TCPConn) IsConnected() bool {
return tcpConn.closeFlag == false
return atomic.LoadInt32(&tcpConn.closeFlag) == 0
}
func (tcpConn *TCPConn) SetReadDeadline(d time.Duration) {

View File

@@ -3,6 +3,7 @@ package network
import (
"encoding/binary"
"errors"
"github.com/duanhf2012/origin/v2/util/bytespool"
"io"
"math"
)
@@ -11,62 +12,36 @@ import (
// | len | data |
// --------------
type MsgParser struct {
lenMsgLen int
minMsgLen uint32
maxMsgLen uint32
littleEndian bool
LenMsgLen int
MinMsgLen uint32
MaxMsgLen uint32
LittleEndian bool
INetMempool
bytespool.IBytesMempool
}
func NewMsgParser() *MsgParser {
p := new(MsgParser)
p.lenMsgLen = 2
p.minMsgLen = 1
p.maxMsgLen = 4096
p.littleEndian = false
p.INetMempool = NewMemAreaPool()
return p
}
// It's dangerous to call the method on reading or writing
func (p *MsgParser) SetMsgLen(lenMsgLen int, minMsgLen uint32, maxMsgLen uint32) {
if lenMsgLen == 1 || lenMsgLen == 2 || lenMsgLen == 4 {
p.lenMsgLen = lenMsgLen
}
if minMsgLen != 0 {
p.minMsgLen = minMsgLen
}
if maxMsgLen != 0 {
p.maxMsgLen = maxMsgLen
}
var max uint32
switch p.lenMsgLen {
func (p *MsgParser) getMaxMsgLen(lenMsgLen int) uint32 {
switch p.LenMsgLen {
case 1:
max = math.MaxUint8
return math.MaxUint8
case 2:
max = math.MaxUint16
return math.MaxUint16
case 4:
max = math.MaxUint32
}
if p.minMsgLen > max {
p.minMsgLen = max
}
if p.maxMsgLen > max {
p.maxMsgLen = max
return math.MaxUint32
default:
panic("LenMsgLen value must be 1 or 2 or 4")
}
}
// It's dangerous to call the method on reading or writing
func (p *MsgParser) SetByteOrder(littleEndian bool) {
p.littleEndian = littleEndian
func (p *MsgParser) init(){
p.IBytesMempool = bytespool.NewMemAreaPool()
}
// goroutine safe
func (p *MsgParser) Read(conn *TCPConn) ([]byte, error) {
var b [4]byte
bufMsgLen := b[:p.lenMsgLen]
bufMsgLen := b[:p.LenMsgLen]
// read len
if _, err := io.ReadFull(conn, bufMsgLen); err != nil {
@@ -75,17 +50,17 @@ func (p *MsgParser) Read(conn *TCPConn) ([]byte, error) {
// parse len
var msgLen uint32
switch p.lenMsgLen {
switch p.LenMsgLen {
case 1:
msgLen = uint32(bufMsgLen[0])
case 2:
if p.littleEndian {
if p.LittleEndian {
msgLen = uint32(binary.LittleEndian.Uint16(bufMsgLen))
} else {
msgLen = uint32(binary.BigEndian.Uint16(bufMsgLen))
}
case 4:
if p.littleEndian {
if p.LittleEndian {
msgLen = binary.LittleEndian.Uint32(bufMsgLen)
} else {
msgLen = binary.BigEndian.Uint32(bufMsgLen)
@@ -93,16 +68,16 @@ func (p *MsgParser) Read(conn *TCPConn) ([]byte, error) {
}
// check len
if msgLen > p.maxMsgLen {
if msgLen > p.MaxMsgLen {
return nil, errors.New("message too long")
} else if msgLen < p.minMsgLen {
} else if msgLen < p.MinMsgLen {
return nil, errors.New("message too short")
}
// data
msgData := p.MakeByteSlice(int(msgLen))
msgData := p.MakeBytes(int(msgLen))
if _, err := io.ReadFull(conn, msgData[:msgLen]); err != nil {
p.ReleaseByteSlice(msgData)
p.ReleaseBytes(msgData)
return nil, err
}
@@ -118,26 +93,26 @@ func (p *MsgParser) Write(conn *TCPConn, args ...[]byte) error {
}
// check len
if msgLen > p.maxMsgLen {
if msgLen > p.MaxMsgLen {
return errors.New("message too long")
} else if msgLen < p.minMsgLen {
} else if msgLen < p.MinMsgLen {
return errors.New("message too short")
}
//msg := make([]byte, uint32(p.lenMsgLen)+msgLen)
msg := p.MakeByteSlice(p.lenMsgLen+int(msgLen))
msg := p.MakeBytes(p.LenMsgLen+int(msgLen))
// write len
switch p.lenMsgLen {
switch p.LenMsgLen {
case 1:
msg[0] = byte(msgLen)
case 2:
if p.littleEndian {
if p.LittleEndian {
binary.LittleEndian.PutUint16(msg, uint16(msgLen))
} else {
binary.BigEndian.PutUint16(msg, uint16(msgLen))
}
case 4:
if p.littleEndian {
if p.LittleEndian {
binary.LittleEndian.PutUint32(msg, msgLen)
} else {
binary.BigEndian.PutUint32(msg, msgLen)
@@ -145,7 +120,7 @@ func (p *MsgParser) Write(conn *TCPConn, args ...[]byte) error {
}
// write data
l := p.lenMsgLen
l := p.LenMsgLen
for i := 0; i < len(args); i++ {
copy(msg[l:], args[i])
l += len(args[i])

View File

@@ -1,16 +1,33 @@
package network
import (
"github.com/duanhf2012/origin/log"
"github.com/duanhf2012/origin/v2/log"
"github.com/duanhf2012/origin/v2/util/bytespool"
"net"
"sync"
"time"
"fmt"
"errors"
)
const(
Default_ReadDeadline = time.Second*30 //默认读超时30s
Default_WriteDeadline = time.Second*30 //默认写超时30s
Default_MaxConnNum = 1000000 //默认最大连接数
Default_PendingWriteNum = 100000 //单连接写消息Channel容量
Default_LittleEndian = false //默认大小端
Default_MinMsgLen = 2 //最小消息长度2byte
Default_LenMsgLen = 2 //包头字段长度占用2byte
Default_MaxMsgLen = 65535 //最大消息长度
)
type TCPServer struct {
Addr string
MaxConnNum int
PendingWriteNum int
ReadDeadline time.Duration
WriteDeadline time.Duration
NewAgent func(*TCPConn) Agent
ln net.Listener
conns ConnSet
@@ -18,58 +35,83 @@ type TCPServer struct {
wgLn sync.WaitGroup
wgConns sync.WaitGroup
// msg parser
LenMsgLen int
MinMsgLen uint32
MaxMsgLen uint32
LittleEndian bool
msgParser *MsgParser
netMemPool INetMempool
MsgParser
}
func (server *TCPServer) Start() {
server.init()
func (server *TCPServer) Start() error{
err := server.init()
if err != nil {
return err
}
go server.run()
return nil
}
func (server *TCPServer) init() {
func (server *TCPServer) init() error{
ln, err := net.Listen("tcp", server.Addr)
if err != nil {
log.SFatal("Listen tcp error:", err.Error())
return fmt.Errorf("Listen tcp fail,error:%s",err.Error())
}
if server.MaxConnNum <= 0 {
server.MaxConnNum = 100
log.SRelease("invalid MaxConnNum, reset to ", server.MaxConnNum)
server.MaxConnNum = Default_MaxConnNum
log.Info("invalid MaxConnNum",log.Int("reset", server.MaxConnNum))
}
if server.PendingWriteNum <= 0 {
server.PendingWriteNum = 100
log.SRelease("invalid PendingWriteNum, reset to ", server.PendingWriteNum)
server.PendingWriteNum = Default_PendingWriteNum
log.Info("invalid PendingWriteNum",log.Int("reset", server.PendingWriteNum))
}
if server.LenMsgLen <= 0 {
server.LenMsgLen = Default_LenMsgLen
log.Info("invalid LenMsgLen", log.Int("reset", server.LenMsgLen))
}
if server.MaxMsgLen <= 0 {
server.MaxMsgLen = Default_MaxMsgLen
log.Info("invalid MaxMsgLen", log.Uint32("reset to", server.MaxMsgLen))
}
maxMsgLen := server.MsgParser.getMaxMsgLen(server.LenMsgLen)
if server.MaxMsgLen > maxMsgLen {
server.MaxMsgLen = maxMsgLen
log.Info("invalid MaxMsgLen",log.Uint32("reset", maxMsgLen))
}
if server.MinMsgLen <= 0 {
server.MinMsgLen = Default_MinMsgLen
log.Info("invalid MinMsgLen",log.Uint32("reset", server.MinMsgLen))
}
if server.WriteDeadline == 0 {
server.WriteDeadline = Default_WriteDeadline
log.Info("invalid WriteDeadline",log.Int64("reset",int64(server.WriteDeadline.Seconds())))
}
if server.ReadDeadline == 0 {
server.ReadDeadline = Default_ReadDeadline
log.Info("invalid ReadDeadline",log.Int64("reset", int64(server.ReadDeadline.Seconds())))
}
if server.NewAgent == nil {
log.SFatal("NewAgent must not be nil")
return errors.New("NewAgent must not be nil")
}
server.ln = ln
server.conns = make(ConnSet)
// msg parser
msgParser := NewMsgParser()
if msgParser.INetMempool == nil {
msgParser.INetMempool = NewMemAreaPool()
}
msgParser.SetMsgLen(server.LenMsgLen, server.MinMsgLen, server.MaxMsgLen)
msgParser.SetByteOrder(server.LittleEndian)
server.msgParser = msgParser
server.MsgParser.init()
return nil
}
func (server *TCPServer) SetNetMempool(mempool INetMempool){
server.msgParser.INetMempool = mempool
func (server *TCPServer) SetNetMempool(mempool bytespool.IBytesMempool){
server.IBytesMempool = mempool
}
func (server *TCPServer) GetNetMempool() INetMempool{
return server.msgParser.INetMempool
func (server *TCPServer) GetNetMempool() bytespool.IBytesMempool {
return server.IBytesMempool
}
func (server *TCPServer) run() {
@@ -89,12 +131,13 @@ func (server *TCPServer) run() {
if max := 1 * time.Second; tempDelay > max {
tempDelay = max
}
log.SRelease("accept error:",err.Error(),"; retrying in ", tempDelay)
log.Info("accept fail",log.String("error",err.Error()),log.Duration("sleep time", tempDelay))
time.Sleep(tempDelay)
continue
}
return
}
conn.(*net.TCPConn).SetNoDelay(true)
tempDelay = 0
@@ -102,19 +145,19 @@ func (server *TCPServer) run() {
if len(server.conns) >= server.MaxConnNum {
server.mutexConns.Unlock()
conn.Close()
log.SWarning("too many connections")
log.Warning("too many connections")
continue
}
server.conns[conn] = struct{}{}
server.mutexConns.Unlock()
server.wgConns.Add(1)
tcpConn := newTCPConn(conn, server.PendingWriteNum, server.msgParser)
tcpConn := newTCPConn(conn, server.PendingWriteNum, &server.MsgParser,server.WriteDeadline)
agent := server.NewAgent(tcpConn)
go func() {
agent.Run()
// cleanup
tcpConn.Close()
server.mutexConns.Lock()

View File

@@ -1,7 +1,7 @@
package network
import (
"github.com/duanhf2012/origin/log"
"github.com/duanhf2012/origin/v2/log"
"github.com/gorilla/websocket"
"sync"
"time"
@@ -14,6 +14,7 @@ type WSClient struct {
ConnectInterval time.Duration
PendingWriteNum int
MaxMsgLen uint32
MessageType int
HandshakeTimeout time.Duration
AutoReconnect bool
NewAgent func(*WSConn) Agent
@@ -21,6 +22,7 @@ type WSClient struct {
cons WebsocketConnSet
wg sync.WaitGroup
closeFlag bool
}
func (client *WSClient) Start() {
@@ -38,29 +40,33 @@ func (client *WSClient) init() {
if client.ConnNum <= 0 {
client.ConnNum = 1
log.SRelease("invalid ConnNum, reset to ", client.ConnNum)
log.Info("invalid ConnNum",log.Int("reset", client.ConnNum))
}
if client.ConnectInterval <= 0 {
client.ConnectInterval = 3 * time.Second
log.SRelease("invalid ConnectInterval, reset to ", client.ConnectInterval)
log.Info("invalid ConnectInterval",log.Duration("reset", client.ConnectInterval))
}
if client.PendingWriteNum <= 0 {
client.PendingWriteNum = 100
log.SRelease("invalid PendingWriteNum, reset to ", client.PendingWriteNum)
log.Info("invalid PendingWriteNum",log.Int("reset", client.PendingWriteNum))
}
if client.MaxMsgLen <= 0 {
client.MaxMsgLen = 4096
log.SRelease("invalid MaxMsgLen, reset to ", client.MaxMsgLen)
log.Info("invalid MaxMsgLen",log.Uint32("reset", client.MaxMsgLen))
}
if client.HandshakeTimeout <= 0 {
client.HandshakeTimeout = 10 * time.Second
log.SRelease("invalid HandshakeTimeout, reset to ", client.HandshakeTimeout)
log.Info("invalid HandshakeTimeout",log.Duration("reset", client.HandshakeTimeout))
}
if client.NewAgent == nil {
log.SFatal("NewAgent must not be nil")
log.Fatal("NewAgent must not be nil")
}
if client.cons != nil {
log.SFatal("client is running")
log.Fatal("client is running")
}
if client.MessageType == 0 {
client.MessageType = websocket.TextMessage
}
client.cons = make(WebsocketConnSet)
@@ -77,7 +83,7 @@ func (client *WSClient) dial() *websocket.Conn {
return conn
}
log.SRelease("connect to ", client.Addr," error: ", err.Error())
log.Info("connect fail", log.String("error",err.Error()),log.String("addr",client.Addr))
time.Sleep(client.ConnectInterval)
continue
}
@@ -102,7 +108,7 @@ reconnect:
client.cons[conn] = struct{}{}
client.Unlock()
wsConn := newWSConn(conn, client.PendingWriteNum, client.MaxMsgLen)
wsConn := newWSConn(conn, client.PendingWriteNum, client.MaxMsgLen,client.MessageType)
agent := client.NewAgent(wsConn)
agent.Run()

View File

@@ -2,7 +2,7 @@ package network
import (
"errors"
"github.com/duanhf2012/origin/log"
"github.com/duanhf2012/origin/v2/log"
"github.com/gorilla/websocket"
"net"
"sync"
@@ -18,7 +18,7 @@ type WSConn struct {
closeFlag bool
}
func newWSConn(conn *websocket.Conn, pendingWriteNum int, maxMsgLen uint32) *WSConn {
func newWSConn(conn *websocket.Conn, pendingWriteNum int, maxMsgLen uint32,messageType int) *WSConn {
wsConn := new(WSConn)
wsConn.conn = conn
wsConn.writeChan = make(chan []byte, pendingWriteNum)
@@ -30,7 +30,7 @@ func newWSConn(conn *websocket.Conn, pendingWriteNum int, maxMsgLen uint32) *WSC
break
}
err := conn.WriteMessage(websocket.BinaryMessage, b)
err := conn.WriteMessage(messageType, b)
if err != nil {
break
}
@@ -75,7 +75,7 @@ func (wsConn *WSConn) Close() {
func (wsConn *WSConn) doWrite(b []byte) {
if len(wsConn.writeChan) == cap(wsConn.writeChan) {
log.SDebug("close conn: channel full")
log.Debug("close conn: channel full")
wsConn.doDestroy()
return
}

View File

@@ -2,7 +2,7 @@ package network
import (
"crypto/tls"
"github.com/duanhf2012/origin/log"
"github.com/duanhf2012/origin/v2/log"
"github.com/gorilla/websocket"
"net"
"net/http"
@@ -21,6 +21,7 @@ type WSServer struct {
NewAgent func(*WSConn) Agent
ln net.Listener
handler *WSHandler
messageType int
}
type WSHandler struct {
@@ -32,6 +33,11 @@ type WSHandler struct {
conns WebsocketConnSet
mutexConns sync.Mutex
wg sync.WaitGroup
messageType int
}
func (handler *WSHandler) SetMessageType(messageType int) {
handler.messageType = messageType
}
func (handler *WSHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
@@ -41,10 +47,13 @@ func (handler *WSHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
}
conn, err := handler.upgrader.Upgrade(w, r, nil)
if err != nil {
log.SError("upgrade error: ", err.Error())
log.Error("upgrade fail",log.String("error",err.Error()))
return
}
conn.SetReadLimit(int64(handler.maxMsgLen))
if handler.messageType == 0 {
handler.messageType = websocket.TextMessage
}
handler.wg.Add(1)
defer handler.wg.Done()
@@ -58,13 +67,13 @@ func (handler *WSHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if len(handler.conns) >= handler.maxConnNum {
handler.mutexConns.Unlock()
conn.Close()
log.SWarning("too many connections")
log.Warning("too many connections")
return
}
handler.conns[conn] = struct{}{}
handler.mutexConns.Unlock()
wsConn := newWSConn(conn, handler.pendingWriteNum, handler.maxMsgLen)
wsConn := newWSConn(conn, handler.pendingWriteNum, handler.maxMsgLen, handler.messageType)
agent := handler.newAgent(wsConn)
agent.Run()
@@ -76,30 +85,37 @@ func (handler *WSHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
agent.OnClose()
}
func (server *WSServer) SetMessageType(messageType int) {
server.messageType = messageType
if server.handler != nil {
server.handler.SetMessageType(messageType)
}
}
func (server *WSServer) Start() {
ln, err := net.Listen("tcp", server.Addr)
if err != nil {
log.SFatal("WSServer Listen fail:", err.Error())
log.Fatal("WSServer Listen fail",log.String("error", err.Error()))
}
if server.MaxConnNum <= 0 {
server.MaxConnNum = 100
log.SRelease("invalid MaxConnNum, reset to ", server.MaxConnNum)
log.Info("invalid MaxConnNum", log.Int("reset", server.MaxConnNum))
}
if server.PendingWriteNum <= 0 {
server.PendingWriteNum = 100
log.SRelease("invalid PendingWriteNum, reset to ", server.PendingWriteNum)
log.Info("invalid PendingWriteNum", log.Int("reset", server.PendingWriteNum))
}
if server.MaxMsgLen <= 0 {
server.MaxMsgLen = 4096
log.SRelease("invalid MaxMsgLen, reset to ", server.MaxMsgLen)
log.Info("invalid MaxMsgLen", log.Uint32("reset", server.MaxMsgLen))
}
if server.HTTPTimeout <= 0 {
server.HTTPTimeout = 10 * time.Second
log.SRelease("invalid HTTPTimeout, reset to ", server.HTTPTimeout)
log.Info("invalid HTTPTimeout", log.Duration("reset", server.HTTPTimeout))
}
if server.NewAgent == nil {
log.SFatal("NewAgent must not be nil")
log.Fatal("NewAgent must not be nil")
}
if server.CertFile != "" || server.KeyFile != "" {
@@ -110,7 +126,7 @@ func (server *WSServer) Start() {
config.Certificates = make([]tls.Certificate, 1)
config.Certificates[0], err = tls.LoadX509KeyPair(server.CertFile, server.KeyFile)
if err != nil {
log.SFatal("LoadX509KeyPair fail:", err.Error())
log.Fatal("LoadX509KeyPair fail",log.String("error", err.Error()))
}
ln = tls.NewListener(ln, config)
@@ -123,6 +139,7 @@ func (server *WSServer) Start() {
maxMsgLen: server.MaxMsgLen,
newAgent: server.NewAgent,
conns: make(WebsocketConnSet),
messageType:server.messageType,
upgrader: websocket.Upgrader{
HandshakeTimeout: server.HTTPTimeout,
CheckOrigin: func(_ *http.Request) bool { return true },

View File

@@ -3,15 +3,14 @@ package node
import (
"errors"
"fmt"
"github.com/duanhf2012/origin/cluster"
"github.com/duanhf2012/origin/console"
"github.com/duanhf2012/origin/log"
"github.com/duanhf2012/origin/profiler"
"github.com/duanhf2012/origin/service"
"github.com/duanhf2012/origin/util/timer"
"github.com/duanhf2012/origin/util/buildtime"
"io/ioutil"
slog "log"
"github.com/duanhf2012/origin/v2/cluster"
"github.com/duanhf2012/origin/v2/console"
"github.com/duanhf2012/origin/v2/log"
"github.com/duanhf2012/origin/v2/profiler"
"github.com/duanhf2012/origin/v2/service"
"github.com/duanhf2012/origin/v2/util/buildtime"
"github.com/duanhf2012/origin/v2/util/timer"
"io"
"net/http"
_ "net/http/pprof"
"os"
@@ -20,44 +19,62 @@ import (
"strings"
"syscall"
"time"
"github.com/duanhf2012/origin/v2/util/sysprocess"
)
var closeSig chan bool
var sig chan os.Signal
var nodeId int
var nodeId string
var preSetupService []service.IService //预安装
var preSetupTemplateService []func()service.IService
var profilerInterval time.Duration
var bValid bool
var configDir = "./config/"
var logLevel string = "debug"
var logPath string
const(
SingleStop syscall.Signal = 10
SignalRetire syscall.Signal = 12
)
type BuildOSType = int8
const(
Windows BuildOSType = 0
Linux BuildOSType = 1
Mac BuildOSType = 2
)
func init() {
sig = make(chan os.Signal, 4)
signal.Notify(sig, syscall.SIGINT, syscall.SIGTERM, SingleStop,SignalRetire)
closeSig = make(chan bool,1)
sig = make(chan os.Signal, 3)
signal.Notify(sig, syscall.SIGINT, syscall.SIGTERM,syscall.Signal(10))
console.RegisterCommandBool("help",false,"<-help> This help.",usage)
console.RegisterCommandString("name","","<-name nodeName> Node's name.",setName)
console.RegisterCommandString("start","","<-start nodeid=nodeid> Run originserver.",startNode)
console.RegisterCommandString("stop","","<-stop nodeid=nodeid> Stop originserver process.",stopNode)
console.RegisterCommandString("config","","<-config path> Configuration file path.",setConfigPath)
console.RegisterCommandBool("help", false, "<-help> This help.", usage)
console.RegisterCommandString("name", "", "<-name nodeName> Node's name.", setName)
console.RegisterCommandString("start", "", "<-start nodeid=nodeid> Run originserver.", startNode)
console.RegisterCommandString("stop", "", "<-stop nodeid=nodeid> Stop originserver process.", stopNode)
console.RegisterCommandString("retire", "", "<-retire nodeid=nodeid> retire originserver process.", retireNode)
console.RegisterCommandString("config", "", "<-config path> Configuration file path.", setConfigPath)
console.RegisterCommandString("console", "", "<-console true|false> Turn on or off screen log output.", openConsole)
console.RegisterCommandString("loglevel", "debug", "<-loglevel debug|release|warning|error|fatal> Set loglevel.", setLevel)
console.RegisterCommandString("logpath", "", "<-logpath path> Set log file path.", setLogPath)
console.RegisterCommandString("pprof","","<-pprof ip:port> Open performance analysis.",setPprof)
console.RegisterCommandInt("logsize", 0, "<-logsize size> Set log size(MB).", setLogSize)
console.RegisterCommandInt("logchannelcap", 0, "<-logchannelcap num> Set log channel cap.", setLogChannelCapNum)
console.RegisterCommandString("pprof", "", "<-pprof ip:port> Open performance analysis.", setPprof)
}
func usage(val interface{}) error{
func notifyAllServiceRetire(){
service.NotifyAllServiceRetire()
}
func usage(val interface{}) error {
ret := val.(bool)
if ret == false {
return nil
}
if len(buildtime.GetBuildDateTime())>0 {
fmt.Fprintf(os.Stderr, "Welcome to Origin(build info: %s)\nUsage: originserver [-help] [-start node=1] [-stop] [-config path] [-pprof 0.0.0.0:6060]...\n",buildtime.GetBuildDateTime())
}else{
if len(buildtime.GetBuildDateTime()) > 0 {
fmt.Fprintf(os.Stderr, "Welcome to Origin(build info: %s)\nUsage: originserver [-help] [-start node=1] [-stop] [-config path] [-pprof 0.0.0.0:6060]...\n", buildtime.GetBuildDateTime())
} else {
fmt.Fprintf(os.Stderr, "Welcome to Origin\nUsage: originserver [-help] [-start node=1] [-stop] [-config path] [-pprof 0.0.0.0:6060]...\n")
}
@@ -71,28 +88,28 @@ func setName(val interface{}) error {
func setPprof(val interface{}) error {
listenAddr := val.(string)
if listenAddr==""{
if listenAddr == "" {
return nil
}
go func(){
go func() {
err := http.ListenAndServe(listenAddr, nil)
if err != nil {
panic(fmt.Errorf("%+v",err))
panic(fmt.Errorf("%+v", err))
}
}()
return nil
}
func setConfigPath(val interface{}) error{
func setConfigPath(val interface{}) error {
configPath := val.(string)
if configPath==""{
if configPath == "" {
return nil
}
_, err := os.Stat(configPath)
if err != nil {
return fmt.Errorf("Cannot find file path %s",configPath)
return fmt.Errorf("Cannot find file path %s", configPath)
}
cluster.SetConfigDir(configPath)
@@ -100,30 +117,30 @@ func setConfigPath(val interface{}) error{
return nil
}
func getRunProcessPid(nodeId int) (int,error) {
f, err := os.OpenFile(fmt.Sprintf("%s_%d.pid",os.Args[0],nodeId), os.O_RDONLY, 0600)
func getRunProcessPid(nodeId string) (int, error) {
f, err := os.OpenFile(fmt.Sprintf("%s_%s.pid", os.Args[0], nodeId), os.O_RDONLY, 0600)
defer f.Close()
if err!= nil {
return 0,err
if err != nil {
return 0, err
}
pidByte,errs := ioutil.ReadAll(f)
if errs!=nil {
return 0,errs
pidByte, errs := io.ReadAll(f)
if errs != nil {
return 0, errs
}
return strconv.Atoi(string(pidByte))
}
func writeProcessPid(nodeId int) {
func writeProcessPid(nodeId string) {
//pid
f, err := os.OpenFile(fmt.Sprintf("%s_%d.pid",os.Args[0],nodeId), os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0600)
f, err := os.OpenFile(fmt.Sprintf("%s_%s.pid", os.Args[0], nodeId), os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0600)
defer f.Close()
if err != nil {
fmt.Println(err.Error())
os.Exit(-1)
} else {
_,err=f.Write([]byte(fmt.Sprintf("%d",os.Getpid())))
_, err = f.Write([]byte(fmt.Sprintf("%d", os.Getpid())))
if err != nil {
fmt.Println(err.Error())
os.Exit(-1)
@@ -131,64 +148,128 @@ func writeProcessPid(nodeId int) {
}
}
func GetNodeId() int {
func GetNodeId() string {
return nodeId
}
func initNode(id int){
func initNode(id string) {
//1.初始化集群
nodeId = id
err := cluster.GetCluster().Init(GetNodeId(),Setup)
err := cluster.GetCluster().Init(GetNodeId(), Setup)
if err != nil {
log.SFatal("read system config is error ",err.Error())
log.Error("Init cluster fail",log.ErrorAttr("error",err))
os.Exit(1)
}
err = initLog()
if err != nil{
if err != nil {
return
}
//2.setup service
for _,s := range preSetupService {
//是否配置的service
if cluster.GetCluster().IsConfigService(s.GetName()) == false {
continue
//2.顺序安装服务
serviceOrder := cluster.GetCluster().GetLocalNodeInfo().ServiceList
for _,serviceName:= range serviceOrder{
bSetup := false
//判断是否有配置模板服务
splitServiceName := strings.Split(serviceName,":")
if len(splitServiceName) == 2 {
serviceName = splitServiceName[0]
templateServiceName := splitServiceName[1]
for _,newSer := range preSetupTemplateService {
ser := newSer()
ser.OnSetup(ser)
if ser.GetName() == templateServiceName {
ser.SetName(serviceName)
ser.Init(ser,cluster.GetRpcClient,cluster.GetRpcServer,cluster.GetCluster().GetServiceCfg(ser.GetName()))
service.Setup(ser)
bSetup = true
break
}
}
if bSetup == false{
log.Error("Template service not found",log.String("service name",serviceName),log.String("template service name",templateServiceName))
os.Exit(1)
}
}
pServiceCfg := cluster.GetCluster().GetServiceCfg(s.GetName())
s.Init(s,cluster.GetRpcClient,cluster.GetRpcServer,pServiceCfg)
for _, s := range preSetupService {
if s.GetName() != serviceName {
continue
}
bSetup = true
pServiceCfg := cluster.GetCluster().GetServiceCfg(s.GetName())
s.Init(s, cluster.GetRpcClient, cluster.GetRpcServer, pServiceCfg)
service.Setup(s)
service.Setup(s)
}
if bSetup == false {
log.Fatal("Service name "+serviceName+" configuration error")
}
}
//3.service初始化
service.Init(closeSig)
service.Init()
}
func initLog() error{
if logPath == ""{
func initLog() error {
if log.LogPath == "" {
setLogPath("./log")
}
localnodeinfo := cluster.GetCluster().GetLocalNodeInfo()
filepre := fmt.Sprintf("%s_%d_", localnodeinfo.NodeName, localnodeinfo.NodeId)
logger,err := log.New(logLevel,logPath,filepre,slog.LstdFlags|slog.Lshortfile,10)
filepre := fmt.Sprintf("%s_", localnodeinfo.NodeId)
logger, err := log.NewTextLogger(log.LogLevel,log.LogPath,filepre,true,log.LogChannelCap)
if err != nil {
fmt.Printf("cannot create log file!\n")
return err
}
log.Export(logger)
log.SetLogger(logger)
return nil
}
func Start() {
err := console.Run(os.Args)
if err!=nil {
fmt.Printf("%+v\n",err)
if err != nil {
fmt.Printf("%+v\n", err)
return
}
}
func retireNode(args interface{}) error {
//1.解析参数
param := args.(string)
if param == "" {
return nil
}
sParam := strings.Split(param, "=")
if len(sParam) != 2 {
return fmt.Errorf("invalid option %s", param)
}
if sParam[0] != "nodeid" {
return fmt.Errorf("invalid option %s", param)
}
nId := strings.TrimSpace(sParam[1])
if nId == "" {
return fmt.Errorf("invalid option %s", param)
}
processId, err := getRunProcessPid(nId)
if err != nil {
return err
}
RetireProcess(processId)
return nil
}
func stopNode(args interface{}) error {
//1.解析参数
param := args.(string)
@@ -196,19 +277,19 @@ func stopNode(args interface{}) error {
return nil
}
sParam := strings.Split(param,"=")
sParam := strings.Split(param, "=")
if len(sParam) != 2 {
return fmt.Errorf("invalid option %s",param)
return fmt.Errorf("invalid option %s", param)
}
if sParam[0]!="nodeid" {
return fmt.Errorf("invalid option %s",param)
if sParam[0] != "nodeid" {
return fmt.Errorf("invalid option %s", param)
}
nodeId,err:= strconv.Atoi(sParam[1])
if err != nil {
return fmt.Errorf("invalid option %s",param)
nId := strings.TrimSpace(sParam[1])
if nId == "" {
return fmt.Errorf("invalid option %s", param)
}
processId,err := getRunProcessPid(nodeId)
processId, err := getRunProcessPid(nId)
if err != nil {
return err
}
@@ -217,68 +298,104 @@ func stopNode(args interface{}) error {
return nil
}
func startNode(args interface{}) error{
func startNode(args interface{}) error {
//1.解析参数
param := args.(string)
if param == "" {
return nil
}
sParam := strings.Split(param,"=")
sParam := strings.Split(param, "=")
if len(sParam) != 2 {
return fmt.Errorf("invalid option %s",param)
return fmt.Errorf("invalid option %s", param)
}
if sParam[0]!="nodeid" {
return fmt.Errorf("invalid option %s",param)
if sParam[0] != "nodeid" {
return fmt.Errorf("invalid option %s", param)
}
nodeId,err:= strconv.Atoi(sParam[1])
if err != nil {
return fmt.Errorf("invalid option %s",param)
strNodeId := strings.TrimSpace(sParam[1])
if strNodeId == "" {
return fmt.Errorf("invalid option %s", param)
}
timer.StartTimer(10*time.Millisecond,1000000)
log.SRelease("Start running server.")
//2.初始化node
initNode(nodeId)
for{
processId, pErr := getRunProcessPid(strNodeId)
if pErr != nil {
break
}
//3.运行service
name, cErr := sysprocess.GetProcessNameByPID(int32(processId))
myName, mErr := sysprocess.GetMyProcessName()
//当前进程名获取失败,不应该发生
if mErr != nil {
log.SInfo("get my process's name is error,", mErr.Error())
os.Exit(-1)
}
//进程id存在而且进程名也相同被认为是当前进程重复运行
if cErr == nil && name == myName {
log.SInfo(fmt.Sprintf("repeat runs are not allowed,node is %s,processid is %d",strNodeId,processId))
os.Exit(-1)
}
break
}
//2.记录进程id号
log.Info("Start running server.")
writeProcessPid(strNodeId)
timer.StartTimer(10*time.Millisecond, 1000000)
//3.初始化node
initNode(strNodeId)
//4.运行service
service.Start()
//4.运行集群
//5.运行集群
cluster.GetCluster().Start()
//5.记录进程id号
writeProcessPid(nodeId)
//6.监听程序退出信号&性能报告
bRun := true
var pProfilerTicker *time.Ticker = &time.Ticker{}
if profilerInterval>0 {
if profilerInterval > 0 {
pProfilerTicker = time.NewTicker(profilerInterval)
}
for bRun {
select {
case <-sig:
log.SRelease("receipt stop signal.")
bRun = false
case <- pProfilerTicker.C:
case s := <-sig:
signal := s.(syscall.Signal)
if signal == SignalRetire {
log.Info("receipt retire signal.")
notifyAllServiceRetire()
}else {
bRun = false
log.Info("receipt stop signal.")
}
case <-pProfilerTicker.C:
profiler.Report()
}
}
cluster.GetCluster().Stop()
//7.退出
close(closeSig)
service.WaitStop()
log.SRelease("Server is stop.")
//7.退出
service.StopAllService()
cluster.GetCluster().Stop()
log.Info("Server is stop.")
log.Close()
return nil
}
func Setup(s ...service.IService) {
for _,sv := range s {
func Setup(s ...service.IService) {
for _, sv := range s {
sv.OnSetup(sv)
preSetupService = append(preSetupService,sv)
preSetupService = append(preSetupService, sv)
}
}
func SetupTemplate(fs ...func()service.IService){
for _, f := range fs {
preSetupTemplateService = append(preSetupTemplateService, f)
}
}
@@ -286,67 +403,107 @@ func GetService(serviceName string) service.IService {
return service.GetService(serviceName)
}
func SetConfigDir(configDir string){
configDir = configDir
cluster.SetConfigDir(configDir)
func SetConfigDir(cfgDir string) {
configDir = cfgDir
cluster.SetConfigDir(cfgDir)
}
func GetConfigDir() string {
return configDir
}
func SetSysLog(strLevel string, pathname string, flag int){
logs,_:= log.New(strLevel,pathname, "", flag,10)
log.Export(logs)
}
func OpenProfilerReport(interval time.Duration){
func OpenProfilerReport(interval time.Duration) {
profilerInterval = interval
}
func openConsole(args interface{}) error{
func openConsole(args interface{}) error {
if args == "" {
return nil
}
strOpen := strings.ToLower(strings.TrimSpace(args.(string)))
if strOpen == "false" {
log.OpenConsole = false
}else if strOpen == "true" {
} else if strOpen == "true" {
log.OpenConsole = true
}else{
} else {
return errors.New("Parameter console error!")
}
return nil
}
func setLevel(args interface{}) error{
if args==""{
func setLevel(args interface{}) error {
if args == "" {
return nil
}
logLevel = strings.TrimSpace(args.(string))
if logLevel!= "debug" && logLevel!="release"&& logLevel!="warning"&&logLevel!="error"&&logLevel!="fatal" {
return errors.New("unknown level: " + logLevel)
strlogLevel := strings.TrimSpace(args.(string))
switch strlogLevel {
case "trace":
log.LogLevel = log.LevelTrace
case "debug":
log.LogLevel = log.LevelDebug
case "info":
log.LogLevel = log.LevelInfo
case "warning":
log.LogLevel = log.LevelWarning
case "error":
log.LogLevel = log.LevelError
case "stack":
log.LogLevel = log.LevelStack
case "fatal":
log.LogLevel = log.LevelFatal
default:
return errors.New("unknown level: " + strlogLevel)
}
return nil
}
func setLogPath(args interface{}) error{
if args == ""{
func setLogPath(args interface{}) error {
if args == "" {
return nil
}
logPath = strings.TrimSpace(args.(string))
dir, err := os.Stat(logPath) //这个文件夹不存在
if err == nil && dir.IsDir()==false {
return errors.New("Not found dir "+logPath)
log.LogPath = strings.TrimSpace(args.(string))
dir, err := os.Stat(log.LogPath) //这个文件夹不存在
if err == nil && dir.IsDir() == false {
return errors.New("Not found dir " + log.LogPath)
}
if err != nil {
err = os.Mkdir(logPath, os.ModePerm)
err = os.Mkdir(log.LogPath, os.ModePerm)
if err != nil {
return errors.New("Cannot create dir "+logPath)
return errors.New("Cannot create dir " + log.LogPath)
}
}
return nil
}
}
func setLogSize(args interface{}) error {
if args == "" {
return nil
}
logSize,ok := args.(int)
if ok == false{
return errors.New("param logsize is error")
}
log.LogSize = int64(logSize)*1024*1024
return nil
}
func setLogChannelCapNum(args interface{}) error {
if args == "" {
return nil
}
logChannelCap,ok := args.(int)
if ok == false{
return errors.New("param logsize is error")
}
log.LogChannelCap = logChannelCap
return nil
}

View File

@@ -8,10 +8,23 @@ import (
)
func KillProcess(processId int){
err := syscall.Kill(processId,syscall.Signal(10))
err := syscall.Kill(processId,SingleStop)
if err != nil {
fmt.Printf("kill processid %d is fail:%+v.\n",processId,err)
}else{
fmt.Printf("kill processid %d is successful.\n",processId)
}
}
func GetBuildOSType() BuildOSType{
return Linux
}
func RetireProcess(processId int){
err := syscall.Kill(processId,SignalRetire)
if err != nil {
fmt.Printf("retire processid %d is fail:%+v.\n",processId,err)
}else{
fmt.Printf("retire processid %d is successful.\n",processId)
}
}

View File

@@ -8,10 +8,23 @@ import (
)
func KillProcess(processId int){
err := syscall.Kill(processId,syscall.Signal(10))
err := syscall.Kill(processId,SingleStop)
if err != nil {
fmt.Printf("kill processid %d is fail:%+v.\n",processId,err)
}else{
fmt.Printf("kill processid %d is successful.\n",processId)
}
}
func GetBuildOSType() BuildOSType{
return Mac
}
func RetireProcess(processId int){
err := syscall.Kill(processId,SignalRetire)
if err != nil {
fmt.Printf("retire processid %d is fail:%+v.\n",processId,err)
}else{
fmt.Printf("retire processid %d is successful.\n",processId)
}
}

View File

@@ -2,6 +2,28 @@
package node
func KillProcess(processId int){
import (
"os"
"fmt"
)
}
func KillProcess(processId int){
procss,err := os.FindProcess(processId)
if err != nil {
fmt.Printf("kill processid %d is fail:%+v.\n",processId,err)
return
}
err = procss.Kill()
if err != nil {
fmt.Printf("kill processid %d is fail:%+v.\n",processId,err)
}
}
func GetBuildOSType() BuildOSType{
return Windows
}
func RetireProcess(processId int){
fmt.Printf("This command does not support Windows")
}

View File

@@ -3,7 +3,7 @@ package profiler
import (
"container/list"
"fmt"
"github.com/duanhf2012/origin/log"
"github.com/duanhf2012/origin/v2/log"
"sync"
"time"
)
@@ -167,7 +167,7 @@ func DefaultReportFunction(name string,callNum int,costTime time.Duration,record
elem = elem.Next()
}
log.SRelease(strReport)
log.SInfo("report",strReport)
}
func Report() {
@@ -193,9 +193,11 @@ func Report() {
record = prof.record
prof.record = list.New()
callNum := prof.callNum
totalCostTime := prof.totalCostTime
prof.stackLocker.RUnlock()
DefaultReportFunction(name,prof.callNum,prof.totalCostTime,record)
DefaultReportFunction(name,callNum,totalCostTime,record)
}
}

146
rpc/callset.go Normal file
View File

@@ -0,0 +1,146 @@
package rpc
import (
"errors"
"strconv"
"sync"
"sync/atomic"
"time"
"github.com/duanhf2012/origin/v2/log"
)
type CallSet struct {
pendingLock sync.RWMutex
startSeq uint64
pending map[uint64]*Call
callRpcTimeout time.Duration
maxCheckCallRpcCount int
callTimerHeap CallTimerHeap
}
func (cs *CallSet) Init(){
cs.pendingLock.Lock()
cs.callTimerHeap.Init()
cs.pending = make(map[uint64]*Call,4096)
cs.maxCheckCallRpcCount = DefaultMaxCheckCallRpcCount
cs.callRpcTimeout = DefaultRpcTimeout
go cs.checkRpcCallTimeout()
cs.pendingLock.Unlock()
}
func (bc *CallSet) makeCallFail(call *Call) {
if call.callback != nil && call.callback.IsValid() {
call.rpcHandler.PushRpcResponse(call)
} else {
call.done <- call
}
}
func (bc *CallSet) checkRpcCallTimeout() {
for{
time.Sleep(DefaultCheckRpcCallTimeoutInterval)
for i := 0; i < bc.maxCheckCallRpcCount; i++ {
bc.pendingLock.Lock()
callSeq := bc.callTimerHeap.PopTimeout()
if callSeq == 0 {
bc.pendingLock.Unlock()
break
}
pCall := bc.pending[callSeq]
if pCall == nil {
bc.pendingLock.Unlock()
log.Error("call seq is not find",log.Uint64("seq", callSeq))
continue
}
delete(bc.pending,callSeq)
strTimeout := strconv.FormatInt(int64(pCall.TimeOut.Seconds()), 10)
pCall.Err = errors.New("RPC call takes more than " + strTimeout + " seconds,method is "+pCall.ServiceMethod)
log.Error("call timeout",log.String("error",pCall.Err.Error()))
bc.makeCallFail(pCall)
bc.pendingLock.Unlock()
continue
}
}
}
func (bc *CallSet) AddPending(call *Call) {
bc.pendingLock.Lock()
if call.Seq == 0 {
bc.pendingLock.Unlock()
log.Stack("call is error.")
return
}
bc.pending[call.Seq] = call
bc.callTimerHeap.AddTimer(call.Seq,call.TimeOut)
bc.pendingLock.Unlock()
}
func (bc *CallSet) RemovePending(seq uint64) *Call {
if seq == 0 {
return nil
}
bc.pendingLock.Lock()
call := bc.removePending(seq)
bc.pendingLock.Unlock()
return call
}
func (bc *CallSet) removePending(seq uint64) *Call {
v, ok := bc.pending[seq]
if ok == false {
return nil
}
bc.callTimerHeap.Cancel(seq)
delete(bc.pending, seq)
return v
}
func (bc *CallSet) FindPending(seq uint64) (pCall *Call) {
if seq == 0 {
return nil
}
bc.pendingLock.Lock()
pCall = bc.pending[seq]
bc.pendingLock.Unlock()
return pCall
}
func (bc *CallSet) cleanPending(){
bc.pendingLock.Lock()
for {
callSeq := bc.callTimerHeap.PopFirst()
if callSeq == 0 {
break
}
pCall := bc.pending[callSeq]
if pCall == nil {
log.Error("call Seq is not find",log.Uint64("seq",callSeq))
continue
}
delete(bc.pending,callSeq)
pCall.Err = errors.New("nodeid is disconnect ")
bc.makeCallFail(pCall)
}
bc.pendingLock.Unlock()
}
func (bc *CallSet) generateSeq() uint64 {
return atomic.AddUint64(&bc.startSeq, 1)
}

View File

@@ -1,200 +1,252 @@
package rpc
import (
"container/list"
"errors"
"fmt"
"github.com/duanhf2012/origin/log"
"github.com/duanhf2012/origin/network"
"github.com/duanhf2012/origin/util/timer"
"math"
"github.com/duanhf2012/origin/v2/network"
"reflect"
"runtime"
"strconv"
"sync"
"sync/atomic"
"time"
"github.com/duanhf2012/origin/v2/log"
"fmt"
)
type Client struct {
clientSeq uint32
id int
bSelfNode bool
network.TCPClient
conn *network.TCPConn
const(
DefaultRpcConnNum = 1
DefaultRpcLenMsgLen = 4
DefaultRpcMinMsgLen = 2
DefaultMaxCheckCallRpcCount = 1000
DefaultMaxPendingWriteNum = 1000000
pendingLock sync.RWMutex
startSeq uint64
pending map[uint64]*list.Element
pendingTimer *list.List
callRpcTimeout time.Duration
maxCheckCallRpcCount int
TriggerRpcEvent
}
DefaultConnectInterval = 2*time.Second
DefaultCheckRpcCallTimeoutInterval = 1*time.Second
DefaultRpcTimeout = 15*time.Second
)
var clientSeq uint32
type IWriter interface {
WriteMsg (nodeId string,args ...[]byte) error
IsConnected() bool
}
type IRealClient interface {
SetConn(conn *network.TCPConn)
Close(waitDone bool)
AsyncCall(NodeId string,timeout time.Duration,rpcHandler IRpcHandler, serviceMethod string, callback reflect.Value, args interface{}, replyParam interface{},cancelable bool) (CancelRpc,error)
Go(NodeId string,timeout time.Duration,rpcHandler IRpcHandler, noReply bool, serviceMethod string, args interface{}, reply interface{}) *Call
RawGo(NodeId string,timeout time.Duration,rpcHandler IRpcHandler,processor IRpcProcessor, noReply bool, rpcMethodId uint32, serviceMethod string, rawArgs []byte, reply interface{}) *Call
IsConnected() bool
Run()
OnClose()
Bind(server IServer)
}
type Client struct {
clientId uint32
targetNodeId string
compressBytesLen int
*CallSet
IRealClient
}
func (client *Client) NewClientAgent(conn *network.TCPConn) network.Agent {
client.conn = conn
client.ResetPending()
client.SetConn(conn)
return client
}
func (client *Client) Connect(id int, addr string, maxRpcParamLen uint32) error {
client.clientSeq = atomic.AddUint32(&clientSeq, 1)
client.id = id
client.Addr = addr
client.maxCheckCallRpcCount = 1000
client.callRpcTimeout = 15 * time.Second
client.ConnNum = 1
client.ConnectInterval = time.Second * 2
client.PendingWriteNum = 200000
client.AutoReconnect = true
client.LenMsgLen = 4
client.MinMsgLen = 2
if maxRpcParamLen > 0 {
client.MaxMsgLen = maxRpcParamLen
} else {
client.MaxMsgLen = math.MaxUint32
func (client *Client) GetTargetNodeId() string {
return client.targetNodeId
}
func (client *Client) GetClientId() uint32 {
return client.clientId
}
func (client *Client) processRpcResponse(responseData []byte) error{
bCompress := (responseData[0]>>7) > 0
processor := GetProcessor(responseData[0]&0x7f)
if processor == nil {
//rc.conn.ReleaseReadMsg(responseData)
err:= errors.New(fmt.Sprintf("cannot find process %d",responseData[0]&0x7f))
log.Error(err.Error())
return err
}
client.NewAgent = client.NewClientAgent
client.LittleEndian = LittleEndian
client.ResetPending()
go client.startCheckRpcCallTimer()
if addr == "" {
client.bSelfNode = true
//1.解析head
response := RpcResponse{}
response.RpcResponseData = processor.MakeRpcResponse(0, "", nil)
//解压缩
byteData := responseData[1:]
var compressBuff []byte
if bCompress == true {
var unCompressErr error
compressBuff,unCompressErr = compressor.UncompressBlock(byteData)
if unCompressErr!= nil {
//rc.conn.ReleaseReadMsg(responseData)
err := fmt.Errorf("uncompressBlock failed,err :%s",unCompressErr.Error())
return err
}
byteData = compressBuff
}
err := processor.Unmarshal(byteData, response.RpcResponseData)
if cap(compressBuff) > 0 {
compressor.UnCompressBufferCollection(compressBuff)
}
//rc.conn.ReleaseReadMsg(bytes)
if err != nil {
processor.ReleaseRpcResponse(response.RpcResponseData)
log.Error("rpcClient Unmarshal head error",log.ErrorAttr("error",err))
return nil
}
client.Start()
v := client.RemovePending(response.RpcResponseData.GetSeq())
if v == nil {
log.Error("rpcClient cannot find seq",log.Uint64("seq",response.RpcResponseData.GetSeq()))
} else {
v.Err = nil
if len(response.RpcResponseData.GetReply()) > 0 {
err = processor.Unmarshal(response.RpcResponseData.GetReply(), v.Reply)
if err != nil {
log.Error("rpcClient Unmarshal body failed",log.ErrorAttr("error",err))
v.Err = err
}
}
if response.RpcResponseData.GetErr() != nil {
v.Err = response.RpcResponseData.GetErr()
}
if v.callback != nil && v.callback.IsValid() {
v.rpcHandler.PushRpcResponse(v)
} else {
v.done <- v
}
}
processor.ReleaseRpcResponse(response.RpcResponseData)
return nil
}
func (client *Client) startCheckRpcCallTimer() {
t := timer.NewTimer(5 * time.Second)
for {
select {
case cTimer := <-t.C:
cTimer.SetupTimer(time.Now())
client.checkRpcCallTimeout()
//func (rc *Client) Go(timeout time.Duration,rpcHandler IRpcHandler,noReply bool, serviceMethod string, args interface{}, reply interface{}) *Call {
// _, processor := GetProcessorType(args)
// InParam, err := processor.Marshal(args)
// if err != nil {
// log.Error("Marshal is fail",log.ErrorAttr("error",err))
// call := MakeCall()
// call.DoError(err)
// return call
// }
//
// return rc.RawGo(timeout,rpcHandler,processor, noReply, 0, serviceMethod, InParam, reply)
//}
func (rc *Client) rawGo(nodeId string,w IWriter,timeout time.Duration,rpcHandler IRpcHandler,processor IRpcProcessor, noReply bool, rpcMethodId uint32, serviceMethod string, rawArgs []byte, reply interface{}) *Call {
call := MakeCall()
call.ServiceMethod = serviceMethod
call.Reply = reply
call.Seq = rc.generateSeq()
call.TimeOut = timeout
request := MakeRpcRequest(processor, call.Seq, rpcMethodId, serviceMethod, noReply, rawArgs)
bytes, err := processor.Marshal(request.RpcRequestData)
ReleaseRpcRequest(request)
if err != nil {
call.Seq = 0
log.Error("marshal is fail",log.String("error",err.Error()))
call.DoError(err)
return call
}
if w == nil || w.IsConnected()==false {
call.Seq = 0
sErr := errors.New(serviceMethod + " was called failed,rpc client is disconnect")
log.Error("conn is disconnect",log.String("error",sErr.Error()))
call.DoError(sErr)
return call
}
var compressBuff[]byte
bCompress := uint8(0)
if rc.compressBytesLen > 0 && len(bytes) >= rc.compressBytesLen {
var cErr error
compressBuff,cErr = compressor.CompressBlock(bytes)
if cErr != nil {
call.Seq = 0
log.Error("compress fail",log.String("error",cErr.Error()))
call.DoError(cErr)
return call
}
if len(compressBuff) < len(bytes) {
bytes = compressBuff
bCompress = 1<<7
}
}
t.Cancel()
timer.ReleaseTimer(t)
}
func (client *Client) makeCallFail(call *Call) {
client.removePending(call.Seq)
if call.callback != nil && call.callback.IsValid() {
call.rpcHandler.PushRpcResponse(call)
} else {
call.done <- call
}
}
func (client *Client) checkRpcCallTimeout() {
now := time.Now()
for i := 0; i < client.maxCheckCallRpcCount; i++ {
client.pendingLock.Lock()
pElem := client.pendingTimer.Front()
if pElem == nil {
client.pendingLock.Unlock()
break
}
pCall := pElem.Value.(*Call)
if now.Sub(pCall.callTime) > client.callRpcTimeout {
strTimeout := strconv.FormatInt(int64(client.callRpcTimeout/time.Second), 10)
pCall.Err = errors.New("RPC call takes more than " + strTimeout + " seconds")
client.makeCallFail(pCall)
client.pendingLock.Unlock()
continue
}
client.pendingLock.Unlock()
}
}
func (client *Client) ResetPending() {
client.pendingLock.Lock()
if client.pending != nil {
for _, v := range client.pending {
v.Value.(*Call).Err = errors.New("node is disconnect")
v.Value.(*Call).done <- v.Value.(*Call)
}
if noReply == false {
rc.AddPending(call)
}
client.pending = make(map[uint64]*list.Element, 4096)
client.pendingTimer = list.New()
client.pendingLock.Unlock()
}
func (client *Client) AddPending(call *Call) {
client.pendingLock.Lock()
call.callTime = time.Now()
elemTimer := client.pendingTimer.PushBack(call)
client.pending[call.Seq] = elemTimer //如果下面发送失败,将会一一直存在这里
client.pendingLock.Unlock()
}
func (client *Client) RemovePending(seq uint64) *Call {
if seq == 0 {
return nil
err = w.WriteMsg(nodeId,[]byte{uint8(processor.GetProcessorType())|bCompress}, bytes)
if cap(compressBuff) >0 {
compressor.CompressBufferCollection(compressBuff)
}
client.pendingLock.Lock()
call := client.removePending(seq)
client.pendingLock.Unlock()
if err != nil {
rc.RemovePending(call.Seq)
log.Error("WiteMsg is fail",log.ErrorAttr("error",err))
call.Seq = 0
call.DoError(err)
}
return call
}
func (client *Client) removePending(seq uint64) *Call {
v, ok := client.pending[seq]
if ok == false {
return nil
}
call := v.Value.(*Call)
client.pendingTimer.Remove(v)
delete(client.pending, seq)
return call
}
func (client *Client) FindPending(seq uint64) *Call {
client.pendingLock.Lock()
v, ok := client.pending[seq]
if ok == false {
client.pendingLock.Unlock()
return nil
}
pCall := v.Value.(*Call)
client.pendingLock.Unlock()
return pCall
}
func (client *Client) generateSeq() uint64 {
return atomic.AddUint64(&client.startSeq, 1)
}
func (client *Client) AsyncCall(rpcHandler IRpcHandler, serviceMethod string, callback reflect.Value, args interface{}, replyParam interface{}) error {
func (rc *Client) asyncCall(nodeId string,w IWriter,timeout time.Duration,rpcHandler IRpcHandler, serviceMethod string, callback reflect.Value, args interface{}, replyParam interface{},cancelable bool) (CancelRpc,error) {
processorType, processor := GetProcessorType(args)
InParam, herr := processor.Marshal(args)
if herr != nil {
return herr
return emptyCancelRpc,herr
}
seq := client.generateSeq()
seq := rc.generateSeq()
request := MakeRpcRequest(processor, seq, 0, serviceMethod, false, InParam)
bytes, err := processor.Marshal(request.RpcRequestData)
ReleaseRpcRequest(request)
if err != nil {
return err
return emptyCancelRpc,err
}
if client.conn == nil {
return errors.New("Rpc server is disconnect,call " + serviceMethod)
if w == nil || w.IsConnected()==false {
return emptyCancelRpc,errors.New("Rpc server is disconnect,call " + serviceMethod)
}
var compressBuff[]byte
bCompress := uint8(0)
if rc.compressBytesLen>0 &&len(bytes) >= rc.compressBytesLen {
var cErr error
compressBuff,cErr = compressor.CompressBlock(bytes)
if cErr != nil {
return emptyCancelRpc,cErr
}
if len(compressBuff) < len(bytes) {
bytes = compressBuff
bCompress = 1<<7
}
}
call := MakeCall()
@@ -203,146 +255,23 @@ func (client *Client) AsyncCall(rpcHandler IRpcHandler, serviceMethod string, ca
call.rpcHandler = rpcHandler
call.ServiceMethod = serviceMethod
call.Seq = seq
client.AddPending(call)
call.TimeOut = timeout
rc.AddPending(call)
err = client.conn.WriteMsg([]byte{uint8(processorType)}, bytes)
err = w.WriteMsg(nodeId,[]byte{uint8(processorType)|bCompress}, bytes)
if cap(compressBuff) >0 {
compressor.CompressBufferCollection(compressBuff)
}
if err != nil {
client.RemovePending(call.Seq)
rc.RemovePending(call.Seq)
ReleaseCall(call)
return err
return emptyCancelRpc,err
}
return nil
}
func (client *Client) RawGo(processor IRpcProcessor, noReply bool, rpcMethodId uint32, serviceMethod string, args []byte, reply interface{}) *Call {
call := MakeCall()
call.ServiceMethod = serviceMethod
call.Reply = reply
call.Seq = client.generateSeq()
request := MakeRpcRequest(processor, call.Seq, rpcMethodId, serviceMethod, noReply, args)
bytes, err := processor.Marshal(request.RpcRequestData)
ReleaseRpcRequest(request)
if err != nil {
call.Seq = 0
call.Err = err
return call
if cancelable {
rpcCancel := RpcCancel{CallSeq:seq,Cli: rc}
return rpcCancel.CancelRpc,nil
}
if client.conn == nil {
call.Seq = 0
call.Err = errors.New(serviceMethod + " was called failed,rpc client is disconnect")
return call
}
if noReply == false {
client.AddPending(call)
}
err = client.conn.WriteMsg([]byte{uint8(processor.GetProcessorType())}, bytes)
if err != nil {
client.RemovePending(call.Seq)
call.Seq = 0
call.Err = err
}
return call
}
func (client *Client) Go(noReply bool, serviceMethod string, args interface{}, reply interface{}) *Call {
_, processor := GetProcessorType(args)
InParam, err := processor.Marshal(args)
if err != nil {
call := MakeCall()
call.Err = err
return call
}
return client.RawGo(processor, noReply, 0, serviceMethod, InParam, reply)
}
func (client *Client) Run() {
defer func() {
if r := recover(); r != nil {
buf := make([]byte, 4096)
l := runtime.Stack(buf, false)
errString := fmt.Sprint(r)
log.SError("core dump info[", errString, "]\n", string(buf[:l]))
}
}()
client.TriggerRpcEvent(true, client.GetClientSeq(), client.GetId())
for {
bytes, err := client.conn.ReadMsg()
if err != nil {
log.SError("rpcClient ", client.Addr, " ReadMsg error:", err.Error())
return
}
processor := GetProcessor(bytes[0])
if processor == nil {
client.conn.ReleaseReadMsg(bytes)
log.SError("rpcClient ", client.Addr, " ReadMsg head error:", err.Error())
return
}
//1.解析head
response := RpcResponse{}
response.RpcResponseData = processor.MakeRpcResponse(0, "", nil)
err = processor.Unmarshal(bytes[1:], response.RpcResponseData)
client.conn.ReleaseReadMsg(bytes)
if err != nil {
processor.ReleaseRpcResponse(response.RpcResponseData)
log.SError("rpcClient Unmarshal head error:", err.Error())
continue
}
v := client.RemovePending(response.RpcResponseData.GetSeq())
if v == nil {
log.SError("rpcClient cannot find seq ", response.RpcResponseData.GetSeq(), " in pending")
} else {
v.Err = nil
if len(response.RpcResponseData.GetReply()) > 0 {
err = processor.Unmarshal(response.RpcResponseData.GetReply(), v.Reply)
if err != nil {
log.SError("rpcClient Unmarshal body error:", err.Error())
v.Err = err
}
}
if response.RpcResponseData.GetErr() != nil {
v.Err = response.RpcResponseData.GetErr()
}
if v.callback != nil && v.callback.IsValid() {
v.rpcHandler.PushRpcResponse(v)
} else {
v.done <- v
}
}
processor.ReleaseRpcResponse(response.RpcResponseData)
}
}
func (client *Client) OnClose() {
client.TriggerRpcEvent(false, client.GetClientSeq(), client.GetId())
}
func (client *Client) IsConnected() bool {
return client.bSelfNode || (client.conn != nil && client.conn.IsConnected() == true)
}
func (client *Client) GetId() int {
return client.id
}
func (client *Client) Close(waitDone bool) {
client.TCPClient.Close(waitDone)
}
func (client *Client) GetClientSeq() uint32 {
return client.clientSeq
}
return emptyCancelRpc,nil
}

102
rpc/compressor.go Normal file
View File

@@ -0,0 +1,102 @@
package rpc
import (
"errors"
"fmt"
"github.com/duanhf2012/origin/v2/util/bytespool"
"github.com/pierrec/lz4/v4"
"runtime"
)
var memPool bytespool.IBytesMempool = bytespool.NewMemAreaPool()
type ICompressor interface {
CompressBlock(src []byte) ([]byte, error) //dst如果有预申请使用dst内存传入nil时内部申请
UncompressBlock(src []byte) ([]byte, error) //dst如果有预申请使用dst内存传入nil时内部申请
CompressBufferCollection(buffer []byte) //压缩的Buffer内存回收
UnCompressBufferCollection(buffer []byte) //解压缩的Buffer内存回收
}
var compressor ICompressor
func init(){
SetCompressor(&Lz4Compressor{})
}
func SetCompressor(cp ICompressor){
compressor = cp
}
type Lz4Compressor struct {
}
func (lc *Lz4Compressor) CompressBlock(src []byte) (dest []byte, err error) {
defer func() {
if r := recover(); r != nil {
buf := make([]byte, 4096)
l := runtime.Stack(buf, false)
errString := fmt.Sprint(r)
err = errors.New("core dump info[" + errString + "]\n" + string(buf[:l]))
}
}()
var c lz4.Compressor
var cnt int
dest = memPool.MakeBytes(lz4.CompressBlockBound(len(src))+1)
cnt, err = c.CompressBlock(src, dest[1:])
if err != nil {
memPool.ReleaseBytes(dest)
return nil,err
}
ratio := len(src) / cnt
if len(src) % cnt > 0 {
ratio += 1
}
if ratio > 255 {
memPool.ReleaseBytes(dest)
return nil,fmt.Errorf("Impermissible errors")
}
dest[0] = uint8(ratio)
dest = dest[:cnt+1]
return
}
func (lc *Lz4Compressor) UncompressBlock(src []byte) (dest []byte, err error) {
defer func() {
if r := recover(); r != nil {
buf := make([]byte, 4096)
l := runtime.Stack(buf, false)
errString := fmt.Sprint(r)
err = errors.New("core dump info[" + errString + "]\n" + string(buf[:l]))
}
}()
radio := uint8(src[0])
if radio == 0 {
return nil,fmt.Errorf("Impermissible errors")
}
dest = memPool.MakeBytes(len(src)*int(radio))
cnt, err := lz4.UncompressBlock(src[1:], dest)
if err != nil {
memPool.ReleaseBytes(dest)
return nil,err
}
return dest[:cnt],nil
}
func (lc *Lz4Compressor) compressBlockBound(n int) int{
return lz4.CompressBlockBound(n)
}
func (lc *Lz4Compressor) CompressBufferCollection(buffer []byte){
memPool.ReleaseBytes(buffer)
}
func (lc *Lz4Compressor) UnCompressBufferCollection(buffer []byte) {
memPool.ReleaseBytes(buffer)
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,29 +0,0 @@
syntax = "proto3";
package rpc;
option go_package = "./rpc";
message NodeInfo{
int32 NodeId = 1;
string NodeName = 2;
string ListenAddr = 3;
uint32 MaxRpcParamLen = 4;
bool Private = 5;
repeated string PublicServiceList = 6;
}
//Client->Master
message ServiceDiscoverReq{
NodeInfo nodeInfo = 1;
}
//Master->Client
message SubscribeDiscoverNotify{
int32 MasterNodeId = 1;
bool IsFull = 2;
int32 DelNodeId = 3;
repeated NodeInfo nodeInfo = 4;
}
//Master->Client
message Empty{
}

View File

@@ -1,95 +0,0 @@
package rpc
import (
"github.com/duanhf2012/origin/util/sync"
"github.com/gogo/protobuf/proto"
)
type GoGoPBProcessor struct {
}
var rpcGoGoPbResponseDataPool =sync.NewPool(make(chan interface{},10240), func()interface{}{
return &GoGoPBRpcResponseData{}
})
var rpcGoGoPbRequestDataPool =sync.NewPool(make(chan interface{},10240), func()interface{}{
return &GoGoPBRpcRequestData{}
})
func (slf *GoGoPBRpcRequestData) MakeRequest(seq uint64,rpcMethodId uint32,serviceMethod string,noReply bool,inParam []byte) *GoGoPBRpcRequestData{
slf.Seq = seq
slf.RpcMethodId = rpcMethodId
slf.ServiceMethod = serviceMethod
slf.NoReply = noReply
slf.InParam = inParam
return slf
}
func (slf *GoGoPBRpcResponseData) MakeRespone(seq uint64,err RpcError,reply []byte) *GoGoPBRpcResponseData{
slf.Seq = seq
slf.Error = err.Error()
slf.Reply = reply
return slf
}
func (slf *GoGoPBProcessor) Marshal(v interface{}) ([]byte, error){
return proto.Marshal(v.(proto.Message))
}
func (slf *GoGoPBProcessor) Unmarshal(data []byte, msg interface{}) error{
protoMsg := msg.(proto.Message)
return proto.Unmarshal(data, protoMsg)
}
func (slf *GoGoPBProcessor) MakeRpcRequest(seq uint64,rpcMethodId uint32,serviceMethod string,noReply bool,inParam []byte) IRpcRequestData{
pGogoPbRpcRequestData := rpcGoGoPbRequestDataPool.Get().(*GoGoPBRpcRequestData)
pGogoPbRpcRequestData.MakeRequest(seq,rpcMethodId,serviceMethod,noReply,inParam)
return pGogoPbRpcRequestData
}
func (slf *GoGoPBProcessor) MakeRpcResponse(seq uint64,err RpcError,reply []byte) IRpcResponseData {
pGoGoPBRpcResponseData := rpcGoGoPbResponseDataPool.Get().(*GoGoPBRpcResponseData)
pGoGoPBRpcResponseData.MakeRespone(seq,err,reply)
return pGoGoPBRpcResponseData
}
func (slf *GoGoPBProcessor) ReleaseRpcRequest(rpcRequestData IRpcRequestData){
rpcGoGoPbRequestDataPool.Put(rpcRequestData)
}
func (slf *GoGoPBProcessor) ReleaseRpcResponse(rpcResponseData IRpcResponseData){
rpcGoGoPbResponseDataPool.Put(rpcResponseData)
}
func (slf *GoGoPBProcessor) IsParse(param interface{}) bool {
_,ok := param.(proto.Message)
return ok
}
func (slf *GoGoPBProcessor) GetProcessorType() RpcProcessorType{
return RpcProcessorGoGoPB
}
func (slf *GoGoPBRpcRequestData) IsNoReply() bool{
return slf.GetNoReply()
}
func (slf *GoGoPBRpcResponseData) GetErr() *RpcError {
if slf.GetError() == "" {
return nil
}
err := RpcError(slf.GetError())
return &err
}

View File

@@ -1,769 +0,0 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: gogorpc.proto
package rpc
import (
fmt "fmt"
proto "github.com/gogo/protobuf/proto"
io "io"
math "math"
math_bits "math/bits"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
type GoGoPBRpcRequestData struct {
Seq uint64 `protobuf:"varint,1,opt,name=Seq,proto3" json:"Seq,omitempty"`
RpcMethodId uint32 `protobuf:"varint,2,opt,name=RpcMethodId,proto3" json:"RpcMethodId,omitempty"`
ServiceMethod string `protobuf:"bytes,3,opt,name=ServiceMethod,proto3" json:"ServiceMethod,omitempty"`
NoReply bool `protobuf:"varint,4,opt,name=NoReply,proto3" json:"NoReply,omitempty"`
InParam []byte `protobuf:"bytes,5,opt,name=InParam,proto3" json:"InParam,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *GoGoPBRpcRequestData) Reset() { *m = GoGoPBRpcRequestData{} }
func (m *GoGoPBRpcRequestData) String() string { return proto.CompactTextString(m) }
func (*GoGoPBRpcRequestData) ProtoMessage() {}
func (*GoGoPBRpcRequestData) Descriptor() ([]byte, []int) {
return fileDescriptor_d0e25d3af112ec8f, []int{0}
}
func (m *GoGoPBRpcRequestData) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *GoGoPBRpcRequestData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_GoGoPBRpcRequestData.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *GoGoPBRpcRequestData) XXX_Merge(src proto.Message) {
xxx_messageInfo_GoGoPBRpcRequestData.Merge(m, src)
}
func (m *GoGoPBRpcRequestData) XXX_Size() int {
return m.Size()
}
func (m *GoGoPBRpcRequestData) XXX_DiscardUnknown() {
xxx_messageInfo_GoGoPBRpcRequestData.DiscardUnknown(m)
}
var xxx_messageInfo_GoGoPBRpcRequestData proto.InternalMessageInfo
func (m *GoGoPBRpcRequestData) GetSeq() uint64 {
if m != nil {
return m.Seq
}
return 0
}
func (m *GoGoPBRpcRequestData) GetRpcMethodId() uint32 {
if m != nil {
return m.RpcMethodId
}
return 0
}
func (m *GoGoPBRpcRequestData) GetServiceMethod() string {
if m != nil {
return m.ServiceMethod
}
return ""
}
func (m *GoGoPBRpcRequestData) GetNoReply() bool {
if m != nil {
return m.NoReply
}
return false
}
func (m *GoGoPBRpcRequestData) GetInParam() []byte {
if m != nil {
return m.InParam
}
return nil
}
type GoGoPBRpcResponseData struct {
Seq uint64 `protobuf:"varint,1,opt,name=Seq,proto3" json:"Seq,omitempty"`
Error string `protobuf:"bytes,2,opt,name=Error,proto3" json:"Error,omitempty"`
Reply []byte `protobuf:"bytes,3,opt,name=Reply,proto3" json:"Reply,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *GoGoPBRpcResponseData) Reset() { *m = GoGoPBRpcResponseData{} }
func (m *GoGoPBRpcResponseData) String() string { return proto.CompactTextString(m) }
func (*GoGoPBRpcResponseData) ProtoMessage() {}
func (*GoGoPBRpcResponseData) Descriptor() ([]byte, []int) {
return fileDescriptor_d0e25d3af112ec8f, []int{1}
}
func (m *GoGoPBRpcResponseData) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *GoGoPBRpcResponseData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_GoGoPBRpcResponseData.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *GoGoPBRpcResponseData) XXX_Merge(src proto.Message) {
xxx_messageInfo_GoGoPBRpcResponseData.Merge(m, src)
}
func (m *GoGoPBRpcResponseData) XXX_Size() int {
return m.Size()
}
func (m *GoGoPBRpcResponseData) XXX_DiscardUnknown() {
xxx_messageInfo_GoGoPBRpcResponseData.DiscardUnknown(m)
}
var xxx_messageInfo_GoGoPBRpcResponseData proto.InternalMessageInfo
func (m *GoGoPBRpcResponseData) GetSeq() uint64 {
if m != nil {
return m.Seq
}
return 0
}
func (m *GoGoPBRpcResponseData) GetError() string {
if m != nil {
return m.Error
}
return ""
}
func (m *GoGoPBRpcResponseData) GetReply() []byte {
if m != nil {
return m.Reply
}
return nil
}
func init() {
proto.RegisterType((*GoGoPBRpcRequestData)(nil), "rpc.GoGoPBRpcRequestData")
proto.RegisterType((*GoGoPBRpcResponseData)(nil), "rpc.GoGoPBRpcResponseData")
}
func init() { proto.RegisterFile("gogorpc.proto", fileDescriptor_d0e25d3af112ec8f) }
var fileDescriptor_d0e25d3af112ec8f = []byte{
// 233 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4d, 0xcf, 0x4f, 0xcf,
0x2f, 0x2a, 0x48, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2e, 0x2a, 0x48, 0x56, 0x5a,
0xc2, 0xc8, 0x25, 0xe2, 0x9e, 0xef, 0x9e, 0x1f, 0xe0, 0x14, 0x54, 0x90, 0x1c, 0x94, 0x5a, 0x58,
0x9a, 0x5a, 0x5c, 0xe2, 0x92, 0x58, 0x92, 0x28, 0x24, 0xc0, 0xc5, 0x1c, 0x9c, 0x5a, 0x28, 0xc1,
0xa8, 0xc0, 0xa8, 0xc1, 0x12, 0x04, 0x62, 0x0a, 0x29, 0x70, 0x71, 0x07, 0x15, 0x24, 0xfb, 0xa6,
0x96, 0x64, 0xe4, 0xa7, 0x78, 0xa6, 0x48, 0x30, 0x29, 0x30, 0x6a, 0xf0, 0x06, 0x21, 0x0b, 0x09,
0xa9, 0x70, 0xf1, 0x06, 0xa7, 0x16, 0x95, 0x65, 0x26, 0xa7, 0x42, 0x84, 0x24, 0x98, 0x15, 0x18,
0x35, 0x38, 0x83, 0x50, 0x05, 0x85, 0x24, 0xb8, 0xd8, 0xfd, 0xf2, 0x83, 0x52, 0x0b, 0x72, 0x2a,
0x25, 0x58, 0x14, 0x18, 0x35, 0x38, 0x82, 0x60, 0x5c, 0x90, 0x8c, 0x67, 0x5e, 0x40, 0x62, 0x51,
0x62, 0xae, 0x04, 0xab, 0x02, 0xa3, 0x06, 0x4f, 0x10, 0x8c, 0xab, 0x14, 0xca, 0x25, 0x8a, 0xe4,
0xca, 0xe2, 0x82, 0xfc, 0xbc, 0xe2, 0x54, 0x1c, 0xce, 0x14, 0xe1, 0x62, 0x75, 0x2d, 0x2a, 0xca,
0x2f, 0x02, 0x3b, 0x90, 0x33, 0x08, 0xc2, 0x01, 0x89, 0x42, 0xac, 0x64, 0x06, 0x1b, 0x0c, 0xe1,
0x38, 0x09, 0x9f, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x51,
0xac, 0x7a, 0xfa, 0x45, 0x05, 0xc9, 0x49, 0x6c, 0xe0, 0xe0, 0x31, 0x06, 0x04, 0x00, 0x00, 0xff,
0xff, 0x26, 0xcf, 0x31, 0x39, 0x2f, 0x01, 0x00, 0x00,
}
func (m *GoGoPBRpcRequestData) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *GoGoPBRpcRequestData) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *GoGoPBRpcRequestData) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.InParam) > 0 {
i -= len(m.InParam)
copy(dAtA[i:], m.InParam)
i = encodeVarintGogorpc(dAtA, i, uint64(len(m.InParam)))
i--
dAtA[i] = 0x2a
}
if m.NoReply {
i--
if m.NoReply {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i--
dAtA[i] = 0x20
}
if len(m.ServiceMethod) > 0 {
i -= len(m.ServiceMethod)
copy(dAtA[i:], m.ServiceMethod)
i = encodeVarintGogorpc(dAtA, i, uint64(len(m.ServiceMethod)))
i--
dAtA[i] = 0x1a
}
if m.RpcMethodId != 0 {
i = encodeVarintGogorpc(dAtA, i, uint64(m.RpcMethodId))
i--
dAtA[i] = 0x10
}
if m.Seq != 0 {
i = encodeVarintGogorpc(dAtA, i, uint64(m.Seq))
i--
dAtA[i] = 0x8
}
return len(dAtA) - i, nil
}
func (m *GoGoPBRpcResponseData) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *GoGoPBRpcResponseData) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *GoGoPBRpcResponseData) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.Reply) > 0 {
i -= len(m.Reply)
copy(dAtA[i:], m.Reply)
i = encodeVarintGogorpc(dAtA, i, uint64(len(m.Reply)))
i--
dAtA[i] = 0x1a
}
if len(m.Error) > 0 {
i -= len(m.Error)
copy(dAtA[i:], m.Error)
i = encodeVarintGogorpc(dAtA, i, uint64(len(m.Error)))
i--
dAtA[i] = 0x12
}
if m.Seq != 0 {
i = encodeVarintGogorpc(dAtA, i, uint64(m.Seq))
i--
dAtA[i] = 0x8
}
return len(dAtA) - i, nil
}
func encodeVarintGogorpc(dAtA []byte, offset int, v uint64) int {
offset -= sovGogorpc(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return base
}
func (m *GoGoPBRpcRequestData) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.Seq != 0 {
n += 1 + sovGogorpc(uint64(m.Seq))
}
if m.RpcMethodId != 0 {
n += 1 + sovGogorpc(uint64(m.RpcMethodId))
}
l = len(m.ServiceMethod)
if l > 0 {
n += 1 + l + sovGogorpc(uint64(l))
}
if m.NoReply {
n += 2
}
l = len(m.InParam)
if l > 0 {
n += 1 + l + sovGogorpc(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *GoGoPBRpcResponseData) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.Seq != 0 {
n += 1 + sovGogorpc(uint64(m.Seq))
}
l = len(m.Error)
if l > 0 {
n += 1 + l + sovGogorpc(uint64(l))
}
l = len(m.Reply)
if l > 0 {
n += 1 + l + sovGogorpc(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func sovGogorpc(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
func sozGogorpc(x uint64) (n int) {
return sovGogorpc(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *GoGoPBRpcRequestData) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGogorpc
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: GoGoPBRpcRequestData: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: GoGoPBRpcRequestData: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Seq", wireType)
}
m.Seq = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGogorpc
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Seq |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field RpcMethodId", wireType)
}
m.RpcMethodId = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGogorpc
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.RpcMethodId |= uint32(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ServiceMethod", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGogorpc
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGogorpc
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGogorpc
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ServiceMethod = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field NoReply", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGogorpc
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
m.NoReply = bool(v != 0)
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field InParam", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGogorpc
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthGogorpc
}
postIndex := iNdEx + byteLen
if postIndex < 0 {
return ErrInvalidLengthGogorpc
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.InParam = append(m.InParam[:0], dAtA[iNdEx:postIndex]...)
if m.InParam == nil {
m.InParam = []byte{}
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGogorpc(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthGogorpc
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthGogorpc
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *GoGoPBRpcResponseData) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGogorpc
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: GoGoPBRpcResponseData: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: GoGoPBRpcResponseData: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Seq", wireType)
}
m.Seq = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGogorpc
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Seq |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGogorpc
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGogorpc
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGogorpc
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Error = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Reply", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGogorpc
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthGogorpc
}
postIndex := iNdEx + byteLen
if postIndex < 0 {
return ErrInvalidLengthGogorpc
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Reply = append(m.Reply[:0], dAtA[iNdEx:postIndex]...)
if m.Reply == nil {
m.Reply = []byte{}
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGogorpc(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthGogorpc
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthGogorpc
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipGogorpc(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGogorpc
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGogorpc
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
case 1:
iNdEx += 8
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGogorpc
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if length < 0 {
return 0, ErrInvalidLengthGogorpc
}
iNdEx += length
case 3:
depth++
case 4:
if depth == 0 {
return 0, ErrUnexpectedEndOfGroupGogorpc
}
depth--
case 5:
iNdEx += 4
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
if iNdEx < 0 {
return 0, ErrInvalidLengthGogorpc
}
if depth == 0 {
return iNdEx, nil
}
}
return 0, io.ErrUnexpectedEOF
}
var (
ErrInvalidLengthGogorpc = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowGogorpc = fmt.Errorf("proto: integer overflow")
ErrUnexpectedEndOfGroupGogorpc = fmt.Errorf("proto: unexpected end of group")
)

View File

@@ -1,8 +1,9 @@
package rpc
import (
"github.com/duanhf2012/origin/util/sync"
"github.com/duanhf2012/origin/v2/util/sync"
jsoniter "github.com/json-iterator/go"
"reflect"
)
var json = jsoniter.ConfigCompatibleWithStandardLibrary
@@ -119,6 +120,22 @@ func (jsonRpcResponseData *JsonRpcResponseData) GetReply() []byte{
}
func (jsonProcessor *JsonProcessor) Clone(src interface{}) (interface{},error){
dstValue := reflect.New(reflect.ValueOf(src).Type().Elem())
bytes,err := json.Marshal(src)
if err != nil {
return nil,err
}
dst := dstValue.Interface()
err = json.Unmarshal(bytes,dst)
if err != nil {
return nil,err
}
return dst,nil
}

137
rpc/lclient.go Normal file
View File

@@ -0,0 +1,137 @@
package rpc
import (
"errors"
"github.com/duanhf2012/origin/v2/log"
"github.com/duanhf2012/origin/v2/network"
"reflect"
"strings"
"sync/atomic"
"time"
)
//本结点的Client
type LClient struct {
selfClient *Client
}
func (rc *LClient) Lock(){
}
func (rc *LClient) Unlock(){
}
func (lc *LClient) Run(){
}
func (lc *LClient) OnClose(){
}
func (lc *LClient) IsConnected() bool {
return true
}
func (lc *LClient) SetConn(conn *network.TCPConn){
}
func (lc *LClient) Close(waitDone bool){
}
func (lc *LClient) Go(nodeId string,timeout time.Duration,rpcHandler IRpcHandler,noReply bool, serviceMethod string, args interface{}, reply interface{}) *Call {
pLocalRpcServer := rpcHandler.GetRpcServer()()
//判断是否是同一服务
findIndex := strings.Index(serviceMethod, ".")
if findIndex == -1 {
sErr := errors.New("Call serviceMethod " + serviceMethod + " is error!")
log.Error("call rpc fail",log.String("error",sErr.Error()))
call := MakeCall()
call.DoError(sErr)
return call
}
serviceName := serviceMethod[:findIndex]
if serviceName == rpcHandler.GetName() { //自己服务调用
//调用自己rpcHandler处理器
err := pLocalRpcServer.myselfRpcHandlerGo(lc.selfClient,serviceName, serviceMethod, args, requestHandlerNull,reply)
call := MakeCall()
if err != nil {
call.DoError(err)
return call
}
call.DoOK()
return call
}
//其他的rpcHandler的处理器
return pLocalRpcServer.selfNodeRpcHandlerGo(timeout,nil, lc.selfClient, noReply, serviceName, 0, serviceMethod, args, reply, nil)
}
func (rc *LClient) RawGo(nodeId string,timeout time.Duration,rpcHandler IRpcHandler,processor IRpcProcessor, noReply bool, rpcMethodId uint32, serviceName string, rawArgs []byte, reply interface{}) *Call {
pLocalRpcServer := rpcHandler.GetRpcServer()()
//服务自我调用
if serviceName == rpcHandler.GetName() {
call := MakeCall()
call.ServiceMethod = serviceName
call.Reply = reply
call.TimeOut = timeout
err := pLocalRpcServer.myselfRpcHandlerGo(rc.selfClient,serviceName, serviceName, rawArgs, requestHandlerNull,nil)
call.Err = err
call.done <- call
return call
}
//其他的rpcHandler的处理器
return pLocalRpcServer.selfNodeRpcHandlerGo(timeout,processor,rc.selfClient, true, serviceName, rpcMethodId, serviceName, nil, nil, rawArgs)
}
func (lc *LClient) AsyncCall(nodeId string,timeout time.Duration,rpcHandler IRpcHandler, serviceMethod string, callback reflect.Value, args interface{}, reply interface{},cancelable bool) (CancelRpc,error) {
pLocalRpcServer := rpcHandler.GetRpcServer()()
//判断是否是同一服务
findIndex := strings.Index(serviceMethod, ".")
if findIndex == -1 {
err := errors.New("Call serviceMethod " + serviceMethod + " is error!")
callback.Call([]reflect.Value{reflect.ValueOf(reply), reflect.ValueOf(err)})
log.Error("serviceMethod format is error",log.String("error",err.Error()))
return emptyCancelRpc,nil
}
serviceName := serviceMethod[:findIndex]
//调用自己rpcHandler处理器
if serviceName == rpcHandler.GetName() { //自己服务调用
return emptyCancelRpc,pLocalRpcServer.myselfRpcHandlerGo(lc.selfClient,serviceName, serviceMethod, args,callback ,reply)
}
//其他的rpcHandler的处理器
calcelRpc,err := pLocalRpcServer.selfNodeRpcHandlerAsyncGo(timeout,lc.selfClient, rpcHandler, false, serviceName, serviceMethod, args, reply, callback,cancelable)
if err != nil {
callback.Call([]reflect.Value{reflect.ValueOf(reply), reflect.ValueOf(err)})
}
return calcelRpc,nil
}
func NewLClient(localNodeId string,callSet *CallSet) *Client{
client := &Client{}
client.clientId = atomic.AddUint32(&clientSeq, 1)
client.targetNodeId = localNodeId
//client.maxCheckCallRpcCount = DefaultMaxCheckCallRpcCount
//client.callRpcTimeout = DefaultRpcTimeout
lClient := &LClient{}
lClient.selfClient = client
client.IRealClient = lClient
client.CallSet = callSet
return client
}
func (rc *LClient) Bind(server IServer){
}

291
rpc/lserver.go Normal file
View File

@@ -0,0 +1,291 @@
package rpc
import (
"errors"
"github.com/duanhf2012/origin/v2/log"
"reflect"
"time"
"strings"
"fmt"
)
type BaseServer struct {
localNodeId string
compressBytesLen int
rpcHandleFinder RpcHandleFinder
iServer IServer
}
func (ls *BaseServer) initBaseServer(compressBytesLen int,rpcHandleFinder RpcHandleFinder){
ls.compressBytesLen = compressBytesLen
ls.rpcHandleFinder = rpcHandleFinder
}
func (ls *BaseServer) myselfRpcHandlerGo(client *Client,handlerName string, serviceMethod string, args interface{},callBack reflect.Value, reply interface{}) error {
rpcHandler := ls.rpcHandleFinder.FindRpcHandler(handlerName)
if rpcHandler == nil {
err := errors.New("service method " + serviceMethod + " not config!")
log.Error("service method not config",log.String("serviceMethod",serviceMethod))
return err
}
return rpcHandler.CallMethod(client,serviceMethod, args,callBack, reply)
}
func (ls *BaseServer) selfNodeRpcHandlerGo(timeout time.Duration,processor IRpcProcessor, client *Client, noReply bool, handlerName string, rpcMethodId uint32, serviceMethod string, args interface{}, reply interface{}, rawArgs []byte) *Call {
pCall := MakeCall()
pCall.Seq = client.generateSeq()
pCall.TimeOut = timeout
pCall.ServiceMethod = serviceMethod
rpcHandler := ls.rpcHandleFinder.FindRpcHandler(handlerName)
if rpcHandler == nil {
err := errors.New("service method " + serviceMethod + " not config!")
log.Error("service method not config",log.String("serviceMethod",serviceMethod),log.ErrorAttr("error",err))
pCall.Seq = 0
pCall.DoError(err)
return pCall
}
var iParam interface{}
if processor == nil {
_, processor = GetProcessorType(args)
}
if args != nil {
var err error
iParam,err = processor.Clone(args)
if err != nil {
sErr := errors.New("RpcHandler " + handlerName + "."+serviceMethod+" deep copy inParam is error:" + err.Error())
log.Error("deep copy inParam is failed",log.String("handlerName",handlerName),log.String("serviceMethod",serviceMethod))
pCall.Seq = 0
pCall.DoError(sErr)
return pCall
}
}
req := MakeRpcRequest(processor, 0, rpcMethodId, serviceMethod, noReply, nil)
req.inParam = iParam
req.localReply = reply
if rawArgs != nil {
var err error
req.inParam, err = rpcHandler.UnmarshalInParam(processor, serviceMethod, rpcMethodId, rawArgs)
if err != nil {
log.Error("unmarshalInParam is failed",log.String("serviceMethod",serviceMethod),log.Uint32("rpcMethodId",rpcMethodId),log.ErrorAttr("error",err))
pCall.Seq = 0
pCall.DoError(err)
ReleaseRpcRequest(req)
return pCall
}
}
if noReply == false {
client.AddPending(pCall)
callSeq := pCall.Seq
req.requestHandle = func(Returns interface{}, Err RpcError) {
if reply != nil && Returns != reply && Returns != nil {
byteReturns, err := req.rpcProcessor.Marshal(Returns)
if err != nil {
Err = ConvertError(err)
log.Error("returns data cannot be marshal",log.Uint64("seq",callSeq),log.ErrorAttr("error",err))
}else{
err = req.rpcProcessor.Unmarshal(byteReturns, reply)
if err != nil {
Err = ConvertError(err)
log.Error("returns data cannot be Unmarshal",log.Uint64("seq",callSeq),log.ErrorAttr("error",err))
}
}
}
ReleaseRpcRequest(req)
v := client.RemovePending(callSeq)
if v == nil {
log.Error("rpcClient cannot find seq",log.Uint64("seq",callSeq))
return
}
if len(Err) == 0 {
v.Err = nil
v.DoOK()
} else {
log.Error(Err.Error())
v.DoError(Err)
}
}
}
err := rpcHandler.PushRpcRequest(req)
if err != nil {
log.Error(err.Error())
pCall.DoError(err)
ReleaseRpcRequest(req)
}
return pCall
}
func (server *BaseServer) selfNodeRpcHandlerAsyncGo(timeout time.Duration,client *Client, callerRpcHandler IRpcHandler, noReply bool, handlerName string, serviceMethod string, args interface{}, reply interface{}, callback reflect.Value,cancelable bool) (CancelRpc,error) {
rpcHandler := server.rpcHandleFinder.FindRpcHandler(handlerName)
if rpcHandler == nil {
err := errors.New("service method " + serviceMethod + " not config!")
log.Error(err.Error())
return emptyCancelRpc,err
}
_, processor := GetProcessorType(args)
iParam,err := processor.Clone(args)
if err != nil {
errM := errors.New("RpcHandler " + handlerName + "."+serviceMethod+" deep copy inParam is error:" + err.Error())
log.Error(errM.Error())
return emptyCancelRpc,errM
}
req := MakeRpcRequest(processor, 0, 0, serviceMethod, noReply, nil)
req.inParam = iParam
req.localReply = reply
cancelRpc := emptyCancelRpc
var callSeq uint64
if noReply == false {
callSeq = client.generateSeq()
pCall := MakeCall()
pCall.Seq = callSeq
pCall.rpcHandler = callerRpcHandler
pCall.callback = &callback
pCall.Reply = reply
pCall.ServiceMethod = serviceMethod
pCall.TimeOut = timeout
client.AddPending(pCall)
rpcCancel := RpcCancel{CallSeq: callSeq,Cli: client}
cancelRpc = rpcCancel.CancelRpc
req.requestHandle = func(Returns interface{}, Err RpcError) {
v := client.RemovePending(callSeq)
if v == nil {
ReleaseRpcRequest(req)
return
}
if len(Err) == 0 {
v.Err = nil
} else {
v.Err = Err
}
if Returns != nil {
v.Reply = Returns
}
v.rpcHandler.PushRpcResponse(v)
ReleaseRpcRequest(req)
}
}
err = rpcHandler.PushRpcRequest(req)
if err != nil {
ReleaseRpcRequest(req)
if callSeq > 0 {
client.RemovePending(callSeq)
}
return emptyCancelRpc,err
}
return cancelRpc,nil
}
func (bs *BaseServer) processRpcRequest(data []byte,connTag string,wrResponse writeResponse) error{
bCompress := (data[0]>>7) > 0
processor := GetProcessor(data[0]&0x7f)
if processor == nil {
return errors.New("cannot find processor")
}
//解析head
var compressBuff []byte
byteData := data[1:]
if bCompress == true {
var unCompressErr error
compressBuff,unCompressErr = compressor.UncompressBlock(byteData)
if unCompressErr!= nil {
return errors.New("UncompressBlock failed")
}
byteData = compressBuff
}
req := MakeRpcRequest(processor, 0, 0, "", false, nil)
err := processor.Unmarshal(byteData, req.RpcRequestData)
if cap(compressBuff) > 0 {
compressor.UnCompressBufferCollection(compressBuff)
}
if err != nil {
if req.RpcRequestData.GetSeq() > 0 {
rpcError := RpcError(err.Error())
if req.RpcRequestData.IsNoReply() == false {
wrResponse(processor,connTag, req.RpcRequestData.GetServiceMethod(), req.RpcRequestData.GetSeq(), nil, rpcError)
}
}
ReleaseRpcRequest(req)
return err
}
//交给程序处理
serviceMethod := strings.Split(req.RpcRequestData.GetServiceMethod(), ".")
if len(serviceMethod) < 1 {
rpcError := RpcError("rpc request req.ServiceMethod is error")
if req.RpcRequestData.IsNoReply() == false {
wrResponse(processor,connTag, req.RpcRequestData.GetServiceMethod(), req.RpcRequestData.GetSeq(), nil, rpcError)
}
ReleaseRpcRequest(req)
log.Error("rpc request req.ServiceMethod is error")
return nil
}
rpcHandler := bs.rpcHandleFinder.FindRpcHandler(serviceMethod[0])
if rpcHandler == nil {
rpcError := RpcError(fmt.Sprintf("service method %s not config!", req.RpcRequestData.GetServiceMethod()))
if req.RpcRequestData.IsNoReply() == false {
wrResponse(processor,connTag, req.RpcRequestData.GetServiceMethod(), req.RpcRequestData.GetSeq(), nil, rpcError)
}
log.Error("serviceMethod not config",log.String("serviceMethod",req.RpcRequestData.GetServiceMethod()))
ReleaseRpcRequest(req)
return nil
}
if req.RpcRequestData.IsNoReply() == false {
req.requestHandle = func(Returns interface{}, Err RpcError) {
wrResponse(processor,connTag, req.RpcRequestData.GetServiceMethod(), req.RpcRequestData.GetSeq(), Returns, Err)
ReleaseRpcRequest(req)
}
}
req.inParam, err = rpcHandler.UnmarshalInParam(req.rpcProcessor, req.RpcRequestData.GetServiceMethod(), req.RpcRequestData.GetRpcMethodId(), req.RpcRequestData.GetInParam())
if err != nil {
rErr := "Call Rpc " + req.RpcRequestData.GetServiceMethod() + " Param error " + err.Error()
log.Error("call rpc param error",log.String("serviceMethod",req.RpcRequestData.GetServiceMethod()),log.ErrorAttr("error",err))
if req.requestHandle != nil {
req.requestHandle(nil, RpcError(rErr))
} else {
ReleaseRpcRequest(req)
}
return nil
}
err = rpcHandler.PushRpcRequest(req)
if err != nil {
rpcError := RpcError(err.Error())
if req.RpcRequestData.IsNoReply() {
wrResponse(processor, connTag,req.RpcRequestData.GetServiceMethod(), req.RpcRequestData.GetSeq(), nil, rpcError)
}
ReleaseRpcRequest(req)
}
return nil
}

673
rpc/messagequeue.pb.go Normal file
View File

@@ -0,0 +1,673 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.31.0
// protoc v4.24.0
// source: rpcproto/messagequeue.proto
package rpc
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type SubscribeType int32
const (
SubscribeType_Subscribe SubscribeType = 0
SubscribeType_Unsubscribe SubscribeType = 1
)
// Enum value maps for SubscribeType.
var (
SubscribeType_name = map[int32]string{
0: "Subscribe",
1: "Unsubscribe",
}
SubscribeType_value = map[string]int32{
"Subscribe": 0,
"Unsubscribe": 1,
}
)
func (x SubscribeType) Enum() *SubscribeType {
p := new(SubscribeType)
*p = x
return p
}
func (x SubscribeType) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (SubscribeType) Descriptor() protoreflect.EnumDescriptor {
return file_rpcproto_messagequeue_proto_enumTypes[0].Descriptor()
}
func (SubscribeType) Type() protoreflect.EnumType {
return &file_rpcproto_messagequeue_proto_enumTypes[0]
}
func (x SubscribeType) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use SubscribeType.Descriptor instead.
func (SubscribeType) EnumDescriptor() ([]byte, []int) {
return file_rpcproto_messagequeue_proto_rawDescGZIP(), []int{0}
}
type SubscribeMethod int32
const (
SubscribeMethod_Method_Custom SubscribeMethod = 0 //自定义模式以消费者设置的StartIndex开始获取或订阅
SubscribeMethod_Method_Last SubscribeMethod = 1 //Last模式以该消费者上次记录的位置开始订阅
)
// Enum value maps for SubscribeMethod.
var (
SubscribeMethod_name = map[int32]string{
0: "Method_Custom",
1: "Method_Last",
}
SubscribeMethod_value = map[string]int32{
"Method_Custom": 0,
"Method_Last": 1,
}
)
func (x SubscribeMethod) Enum() *SubscribeMethod {
p := new(SubscribeMethod)
*p = x
return p
}
func (x SubscribeMethod) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (SubscribeMethod) Descriptor() protoreflect.EnumDescriptor {
return file_rpcproto_messagequeue_proto_enumTypes[1].Descriptor()
}
func (SubscribeMethod) Type() protoreflect.EnumType {
return &file_rpcproto_messagequeue_proto_enumTypes[1]
}
func (x SubscribeMethod) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use SubscribeMethod.Descriptor instead.
func (SubscribeMethod) EnumDescriptor() ([]byte, []int) {
return file_rpcproto_messagequeue_proto_rawDescGZIP(), []int{1}
}
type DBQueuePopReq struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
CustomerId string `protobuf:"bytes,1,opt,name=CustomerId,proto3" json:"CustomerId,omitempty"`
QueueName string `protobuf:"bytes,2,opt,name=QueueName,proto3" json:"QueueName,omitempty"`
PopStartPos int32 `protobuf:"varint,3,opt,name=PopStartPos,proto3" json:"PopStartPos,omitempty"`
PopNum int32 `protobuf:"varint,4,opt,name=PopNum,proto3" json:"PopNum,omitempty"`
PushData []byte `protobuf:"bytes,5,opt,name=pushData,proto3" json:"pushData,omitempty"`
}
func (x *DBQueuePopReq) Reset() {
*x = DBQueuePopReq{}
if protoimpl.UnsafeEnabled {
mi := &file_rpcproto_messagequeue_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *DBQueuePopReq) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DBQueuePopReq) ProtoMessage() {}
func (x *DBQueuePopReq) ProtoReflect() protoreflect.Message {
mi := &file_rpcproto_messagequeue_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DBQueuePopReq.ProtoReflect.Descriptor instead.
func (*DBQueuePopReq) Descriptor() ([]byte, []int) {
return file_rpcproto_messagequeue_proto_rawDescGZIP(), []int{0}
}
func (x *DBQueuePopReq) GetCustomerId() string {
if x != nil {
return x.CustomerId
}
return ""
}
func (x *DBQueuePopReq) GetQueueName() string {
if x != nil {
return x.QueueName
}
return ""
}
func (x *DBQueuePopReq) GetPopStartPos() int32 {
if x != nil {
return x.PopStartPos
}
return 0
}
func (x *DBQueuePopReq) GetPopNum() int32 {
if x != nil {
return x.PopNum
}
return 0
}
func (x *DBQueuePopReq) GetPushData() []byte {
if x != nil {
return x.PushData
}
return nil
}
type DBQueuePopRes struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
QueueName string `protobuf:"bytes,1,opt,name=QueueName,proto3" json:"QueueName,omitempty"`
PushData [][]byte `protobuf:"bytes,2,rep,name=pushData,proto3" json:"pushData,omitempty"`
}
func (x *DBQueuePopRes) Reset() {
*x = DBQueuePopRes{}
if protoimpl.UnsafeEnabled {
mi := &file_rpcproto_messagequeue_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *DBQueuePopRes) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DBQueuePopRes) ProtoMessage() {}
func (x *DBQueuePopRes) ProtoReflect() protoreflect.Message {
mi := &file_rpcproto_messagequeue_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DBQueuePopRes.ProtoReflect.Descriptor instead.
func (*DBQueuePopRes) Descriptor() ([]byte, []int) {
return file_rpcproto_messagequeue_proto_rawDescGZIP(), []int{1}
}
func (x *DBQueuePopRes) GetQueueName() string {
if x != nil {
return x.QueueName
}
return ""
}
func (x *DBQueuePopRes) GetPushData() [][]byte {
if x != nil {
return x.PushData
}
return nil
}
// 订阅
type DBQueueSubscribeReq struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
SubType SubscribeType `protobuf:"varint,1,opt,name=SubType,proto3,enum=SubscribeType" json:"SubType,omitempty"` //订阅类型
Method SubscribeMethod `protobuf:"varint,2,opt,name=Method,proto3,enum=SubscribeMethod" json:"Method,omitempty"` //订阅方法
CustomerId string `protobuf:"bytes,3,opt,name=CustomerId,proto3" json:"CustomerId,omitempty"` //消费者Id
FromNodeId string `protobuf:"bytes,4,opt,name=FromNodeId,proto3" json:"FromNodeId,omitempty"`
RpcMethod string `protobuf:"bytes,5,opt,name=RpcMethod,proto3" json:"RpcMethod,omitempty"`
TopicName string `protobuf:"bytes,6,opt,name=TopicName,proto3" json:"TopicName,omitempty"` //主题名称
StartIndex uint64 `protobuf:"varint,7,opt,name=StartIndex,proto3" json:"StartIndex,omitempty"` //开始位置 ,格式前4位是时间戳秒后面是序号。如果填0时服务自动修改成(4bit 当前时间秒)| (0000 4bit)
OneBatchQuantity int32 `protobuf:"varint,8,opt,name=OneBatchQuantity,proto3" json:"OneBatchQuantity,omitempty"` //订阅一次发送的数量不设置有默认值1000条
}
func (x *DBQueueSubscribeReq) Reset() {
*x = DBQueueSubscribeReq{}
if protoimpl.UnsafeEnabled {
mi := &file_rpcproto_messagequeue_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *DBQueueSubscribeReq) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DBQueueSubscribeReq) ProtoMessage() {}
func (x *DBQueueSubscribeReq) ProtoReflect() protoreflect.Message {
mi := &file_rpcproto_messagequeue_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DBQueueSubscribeReq.ProtoReflect.Descriptor instead.
func (*DBQueueSubscribeReq) Descriptor() ([]byte, []int) {
return file_rpcproto_messagequeue_proto_rawDescGZIP(), []int{2}
}
func (x *DBQueueSubscribeReq) GetSubType() SubscribeType {
if x != nil {
return x.SubType
}
return SubscribeType_Subscribe
}
func (x *DBQueueSubscribeReq) GetMethod() SubscribeMethod {
if x != nil {
return x.Method
}
return SubscribeMethod_Method_Custom
}
func (x *DBQueueSubscribeReq) GetCustomerId() string {
if x != nil {
return x.CustomerId
}
return ""
}
func (x *DBQueueSubscribeReq) GetFromNodeId() string {
if x != nil {
return x.FromNodeId
}
return ""
}
func (x *DBQueueSubscribeReq) GetRpcMethod() string {
if x != nil {
return x.RpcMethod
}
return ""
}
func (x *DBQueueSubscribeReq) GetTopicName() string {
if x != nil {
return x.TopicName
}
return ""
}
func (x *DBQueueSubscribeReq) GetStartIndex() uint64 {
if x != nil {
return x.StartIndex
}
return 0
}
func (x *DBQueueSubscribeReq) GetOneBatchQuantity() int32 {
if x != nil {
return x.OneBatchQuantity
}
return 0
}
type DBQueueSubscribeRes struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
}
func (x *DBQueueSubscribeRes) Reset() {
*x = DBQueueSubscribeRes{}
if protoimpl.UnsafeEnabled {
mi := &file_rpcproto_messagequeue_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *DBQueueSubscribeRes) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DBQueueSubscribeRes) ProtoMessage() {}
func (x *DBQueueSubscribeRes) ProtoReflect() protoreflect.Message {
mi := &file_rpcproto_messagequeue_proto_msgTypes[3]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DBQueueSubscribeRes.ProtoReflect.Descriptor instead.
func (*DBQueueSubscribeRes) Descriptor() ([]byte, []int) {
return file_rpcproto_messagequeue_proto_rawDescGZIP(), []int{3}
}
type DBQueuePublishReq struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
TopicName string `protobuf:"bytes,1,opt,name=TopicName,proto3" json:"TopicName,omitempty"` //主是,名称,数据
PushData [][]byte `protobuf:"bytes,2,rep,name=pushData,proto3" json:"pushData,omitempty"`
}
func (x *DBQueuePublishReq) Reset() {
*x = DBQueuePublishReq{}
if protoimpl.UnsafeEnabled {
mi := &file_rpcproto_messagequeue_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *DBQueuePublishReq) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DBQueuePublishReq) ProtoMessage() {}
func (x *DBQueuePublishReq) ProtoReflect() protoreflect.Message {
mi := &file_rpcproto_messagequeue_proto_msgTypes[4]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DBQueuePublishReq.ProtoReflect.Descriptor instead.
func (*DBQueuePublishReq) Descriptor() ([]byte, []int) {
return file_rpcproto_messagequeue_proto_rawDescGZIP(), []int{4}
}
func (x *DBQueuePublishReq) GetTopicName() string {
if x != nil {
return x.TopicName
}
return ""
}
func (x *DBQueuePublishReq) GetPushData() [][]byte {
if x != nil {
return x.PushData
}
return nil
}
type DBQueuePublishRes struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
}
func (x *DBQueuePublishRes) Reset() {
*x = DBQueuePublishRes{}
if protoimpl.UnsafeEnabled {
mi := &file_rpcproto_messagequeue_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *DBQueuePublishRes) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DBQueuePublishRes) ProtoMessage() {}
func (x *DBQueuePublishRes) ProtoReflect() protoreflect.Message {
mi := &file_rpcproto_messagequeue_proto_msgTypes[5]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DBQueuePublishRes.ProtoReflect.Descriptor instead.
func (*DBQueuePublishRes) Descriptor() ([]byte, []int) {
return file_rpcproto_messagequeue_proto_rawDescGZIP(), []int{5}
}
var File_rpcproto_messagequeue_proto protoreflect.FileDescriptor
var file_rpcproto_messagequeue_proto_rawDesc = []byte{
0x0a, 0x1b, 0x72, 0x70, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61,
0x67, 0x65, 0x71, 0x75, 0x65, 0x75, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa3, 0x01,
0x0a, 0x0d, 0x44, 0x42, 0x51, 0x75, 0x65, 0x75, 0x65, 0x50, 0x6f, 0x70, 0x52, 0x65, 0x71, 0x12,
0x1e, 0x0a, 0x0a, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x49, 0x64, 0x18, 0x01, 0x20,
0x01, 0x28, 0x09, 0x52, 0x0a, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x49, 0x64, 0x12,
0x1c, 0x0a, 0x09, 0x51, 0x75, 0x65, 0x75, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01,
0x28, 0x09, 0x52, 0x09, 0x51, 0x75, 0x65, 0x75, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a,
0x0b, 0x50, 0x6f, 0x70, 0x53, 0x74, 0x61, 0x72, 0x74, 0x50, 0x6f, 0x73, 0x18, 0x03, 0x20, 0x01,
0x28, 0x05, 0x52, 0x0b, 0x50, 0x6f, 0x70, 0x53, 0x74, 0x61, 0x72, 0x74, 0x50, 0x6f, 0x73, 0x12,
0x16, 0x0a, 0x06, 0x50, 0x6f, 0x70, 0x4e, 0x75, 0x6d, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52,
0x06, 0x50, 0x6f, 0x70, 0x4e, 0x75, 0x6d, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x75, 0x73, 0x68, 0x44,
0x61, 0x74, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x70, 0x75, 0x73, 0x68, 0x44,
0x61, 0x74, 0x61, 0x22, 0x49, 0x0a, 0x0d, 0x44, 0x42, 0x51, 0x75, 0x65, 0x75, 0x65, 0x50, 0x6f,
0x70, 0x52, 0x65, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x51, 0x75, 0x65, 0x75, 0x65, 0x4e, 0x61, 0x6d,
0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x51, 0x75, 0x65, 0x75, 0x65, 0x4e, 0x61,
0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x75, 0x73, 0x68, 0x44, 0x61, 0x74, 0x61, 0x18, 0x02,
0x20, 0x03, 0x28, 0x0c, 0x52, 0x08, 0x70, 0x75, 0x73, 0x68, 0x44, 0x61, 0x74, 0x61, 0x22, 0xb1,
0x02, 0x0a, 0x13, 0x44, 0x42, 0x51, 0x75, 0x65, 0x75, 0x65, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72,
0x69, 0x62, 0x65, 0x52, 0x65, 0x71, 0x12, 0x28, 0x0a, 0x07, 0x53, 0x75, 0x62, 0x54, 0x79, 0x70,
0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0e, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72,
0x69, 0x62, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, 0x53, 0x75, 0x62, 0x54, 0x79, 0x70, 0x65,
0x12, 0x28, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e,
0x32, 0x10, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x4d, 0x65, 0x74, 0x68,
0x6f, 0x64, 0x52, 0x06, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x43, 0x75,
0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x49, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a,
0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x46, 0x72,
0x6f, 0x6d, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a,
0x46, 0x72, 0x6f, 0x6d, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x52, 0x70,
0x63, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x52,
0x70, 0x63, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x54, 0x6f, 0x70, 0x69,
0x63, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x54, 0x6f, 0x70,
0x69, 0x63, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x53, 0x74, 0x61, 0x72, 0x74, 0x49,
0x6e, 0x64, 0x65, 0x78, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x53, 0x74, 0x61, 0x72,
0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x2a, 0x0a, 0x10, 0x4f, 0x6e, 0x65, 0x42, 0x61, 0x74,
0x63, 0x68, 0x51, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x05,
0x52, 0x10, 0x4f, 0x6e, 0x65, 0x42, 0x61, 0x74, 0x63, 0x68, 0x51, 0x75, 0x61, 0x6e, 0x74, 0x69,
0x74, 0x79, 0x22, 0x15, 0x0a, 0x13, 0x44, 0x42, 0x51, 0x75, 0x65, 0x75, 0x65, 0x53, 0x75, 0x62,
0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x52, 0x65, 0x73, 0x22, 0x4d, 0x0a, 0x11, 0x44, 0x42, 0x51,
0x75, 0x65, 0x75, 0x65, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x12, 0x1c,
0x0a, 0x09, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
0x09, 0x52, 0x09, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08,
0x70, 0x75, 0x73, 0x68, 0x44, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x08,
0x70, 0x75, 0x73, 0x68, 0x44, 0x61, 0x74, 0x61, 0x22, 0x13, 0x0a, 0x11, 0x44, 0x42, 0x51, 0x75,
0x65, 0x75, 0x65, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x73, 0x2a, 0x2f, 0x0a,
0x0d, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d,
0x0a, 0x09, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x10, 0x00, 0x12, 0x0f, 0x0a,
0x0b, 0x55, 0x6e, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x10, 0x01, 0x2a, 0x35,
0x0a, 0x0f, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x4d, 0x65, 0x74, 0x68, 0x6f,
0x64, 0x12, 0x11, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x43, 0x75, 0x73, 0x74,
0x6f, 0x6d, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x4c,
0x61, 0x73, 0x74, 0x10, 0x01, 0x42, 0x07, 0x5a, 0x05, 0x2e, 0x3b, 0x72, 0x70, 0x63, 0x62, 0x06,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_rpcproto_messagequeue_proto_rawDescOnce sync.Once
file_rpcproto_messagequeue_proto_rawDescData = file_rpcproto_messagequeue_proto_rawDesc
)
func file_rpcproto_messagequeue_proto_rawDescGZIP() []byte {
file_rpcproto_messagequeue_proto_rawDescOnce.Do(func() {
file_rpcproto_messagequeue_proto_rawDescData = protoimpl.X.CompressGZIP(file_rpcproto_messagequeue_proto_rawDescData)
})
return file_rpcproto_messagequeue_proto_rawDescData
}
var file_rpcproto_messagequeue_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
var file_rpcproto_messagequeue_proto_msgTypes = make([]protoimpl.MessageInfo, 6)
var file_rpcproto_messagequeue_proto_goTypes = []interface{}{
(SubscribeType)(0), // 0: SubscribeType
(SubscribeMethod)(0), // 1: SubscribeMethod
(*DBQueuePopReq)(nil), // 2: DBQueuePopReq
(*DBQueuePopRes)(nil), // 3: DBQueuePopRes
(*DBQueueSubscribeReq)(nil), // 4: DBQueueSubscribeReq
(*DBQueueSubscribeRes)(nil), // 5: DBQueueSubscribeRes
(*DBQueuePublishReq)(nil), // 6: DBQueuePublishReq
(*DBQueuePublishRes)(nil), // 7: DBQueuePublishRes
}
var file_rpcproto_messagequeue_proto_depIdxs = []int32{
0, // 0: DBQueueSubscribeReq.SubType:type_name -> SubscribeType
1, // 1: DBQueueSubscribeReq.Method:type_name -> SubscribeMethod
2, // [2:2] is the sub-list for method output_type
2, // [2:2] is the sub-list for method input_type
2, // [2:2] is the sub-list for extension type_name
2, // [2:2] is the sub-list for extension extendee
0, // [0:2] is the sub-list for field type_name
}
func init() { file_rpcproto_messagequeue_proto_init() }
func file_rpcproto_messagequeue_proto_init() {
if File_rpcproto_messagequeue_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_rpcproto_messagequeue_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*DBQueuePopReq); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_rpcproto_messagequeue_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*DBQueuePopRes); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_rpcproto_messagequeue_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*DBQueueSubscribeReq); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_rpcproto_messagequeue_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*DBQueueSubscribeRes); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_rpcproto_messagequeue_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*DBQueuePublishReq); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_rpcproto_messagequeue_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*DBQueuePublishRes); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_rpcproto_messagequeue_proto_rawDesc,
NumEnums: 2,
NumMessages: 6,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_rpcproto_messagequeue_proto_goTypes,
DependencyIndexes: file_rpcproto_messagequeue_proto_depIdxs,
EnumInfos: file_rpcproto_messagequeue_proto_enumTypes,
MessageInfos: file_rpcproto_messagequeue_proto_msgTypes,
}.Build()
File_rpcproto_messagequeue_proto = out.File
file_rpcproto_messagequeue_proto_rawDesc = nil
file_rpcproto_messagequeue_proto_goTypes = nil
file_rpcproto_messagequeue_proto_depIdxs = nil
}

51
rpc/messagequeue.proto Normal file
View File

@@ -0,0 +1,51 @@
syntax = "proto3";
option go_package = ".;rpc";
message DBQueuePopReq {
string CustomerId = 1;
string QueueName = 2;
int32 PopStartPos = 3;
int32 PopNum = 4;
bytes pushData = 5;
}
message DBQueuePopRes {
string QueueName = 1;
repeated bytes pushData = 2;
}
enum SubscribeType {
Subscribe = 0;
Unsubscribe = 1;
}
enum SubscribeMethod {
Method_Custom = 0;//自定义模式以消费者设置的StartIndex开始获取或订阅
Method_Last = 1;//Last模式以该消费者上次记录的位置开始订阅
}
//订阅
message DBQueueSubscribeReq {
SubscribeType SubType = 1; //订阅类型
SubscribeMethod Method = 2; //订阅方法
string CustomerId = 3; //消费者Id
string FromNodeId = 4;
string RpcMethod = 5;
string TopicName = 6; //主题名称
uint64 StartIndex = 7; //开始位置 ,格式前4位是时间戳秒后面是序号。如果填0时服务自动修改成(4bit 当前时间秒)| (0000 4bit)
int32 OneBatchQuantity = 8;//订阅一次发送的数量不设置有默认值1000条
}
message DBQueueSubscribeRes {
}
message DBQueuePublishReq {
string TopicName = 1; //主是,名称,数据
repeated bytes pushData = 2;
}
message DBQueuePublishRes {
}

90
rpc/natsclient.go Normal file
View File

@@ -0,0 +1,90 @@
package rpc
import (
"github.com/duanhf2012/origin/v2/network"
"reflect"
"time"
"github.com/nats-io/nats.go"
"github.com/duanhf2012/origin/v2/log"
)
//跨结点连接的Client
type NatsClient struct {
localNodeId string
notifyEventFun NotifyEventFun
natsConn *nats.Conn
client *Client
}
func (nc *NatsClient) Start(natsConn *nats.Conn) error{
nc.natsConn = natsConn
_,err := nc.natsConn.QueueSubscribe("oc."+nc.localNodeId, "oc",nc.onSubscribe)
return err
}
func (nc *NatsClient) onSubscribe(msg *nats.Msg){
//处理消息
nc.client.processRpcResponse(msg.Data)
}
func (nc *NatsClient) SetConn(conn *network.TCPConn){
}
func (nc *NatsClient) Close(waitDone bool){
}
func (nc *NatsClient) Run(){
}
func (nc *NatsClient) OnClose(){
}
func (rc *NatsClient) Bind(server IServer){
s := server.(*NatsServer)
rc.natsConn = s.natsConn
}
func (rc *NatsClient) Go(nodeId string,timeout time.Duration,rpcHandler IRpcHandler,noReply bool, serviceMethod string, args interface{}, reply interface{}) *Call {
_, processor := GetProcessorType(args)
InParam, err := processor.Marshal(args)
if err != nil {
log.Error("Marshal is fail",log.ErrorAttr("error",err))
call := MakeCall()
call.DoError(err)
return call
}
return rc.client.rawGo(nodeId,rc,timeout,rpcHandler,processor, noReply, 0, serviceMethod, InParam, reply)
}
func (rc *NatsClient) RawGo(nodeId string,timeout time.Duration,rpcHandler IRpcHandler,processor IRpcProcessor, noReply bool, rpcMethodId uint32, serviceMethod string, rawArgs []byte, reply interface{}) *Call {
return rc.client.rawGo(nodeId,rc,timeout,rpcHandler,processor, noReply, rpcMethodId, serviceMethod, rawArgs, reply)
}
func (rc *NatsClient) AsyncCall(nodeId string,timeout time.Duration,rpcHandler IRpcHandler, serviceMethod string, callback reflect.Value, args interface{}, replyParam interface{},cancelable bool) (CancelRpc,error) {
cancelRpc,err := rc.client.asyncCall(nodeId,rc,timeout,rpcHandler, serviceMethod, callback, args, replyParam,cancelable)
if err != nil {
callback.Call([]reflect.Value{reflect.ValueOf(replyParam), reflect.ValueOf(err)})
}
return cancelRpc,nil
}
func (rc *NatsClient) WriteMsg (nodeId string,args ...[]byte) error{
buff := make([]byte,0,4096)
for _,ar := range args {
buff = append(buff,ar...)
}
var msg nats.Msg
msg.Subject = "os."+nodeId
msg.Data = buff
msg.Header = nats.Header{}
msg.Header.Set("fnode",rc.localNodeId)
return rc.natsConn.PublishMsg(&msg)
}
func (rc *NatsClient) IsConnected() bool{
return rc.natsConn!=nil && rc.natsConn.Status() == nats.CONNECTED
}

127
rpc/natsserver.go Normal file
View File

@@ -0,0 +1,127 @@
package rpc
import (
"github.com/nats-io/nats.go"
"github.com/duanhf2012/origin/v2/log"
"time"
)
type NatsServer struct {
BaseServer
natsUrl string
natsConn *nats.Conn
NoRandomize bool
nodeSubTopic string
compressBytesLen int
notifyEventFun NotifyEventFun
}
const reconnectWait = 3*time.Second
func (ns *NatsServer) Start() error{
var err error
var options []nats.Option
options = append(options,nats.DisconnectErrHandler(func(nc *nats.Conn, err error) {
log.Error("nats is disconnected",log.String("connUrl",nc.ConnectedUrl()))
ns.notifyEventFun(&NatsConnEvent{IsConnect:false})
}))
options = append(options,nats.ConnectHandler(func(nc *nats.Conn) {
log.Info("nats is connected",log.String("connUrl",nc.ConnectedUrl()))
ns.notifyEventFun(&NatsConnEvent{IsConnect:true})
//log.Error("nats is connect",log.String("connUrl",nc.ConnectedUrl()))
}))
options = append(options,nats.ReconnectHandler(func(nc *nats.Conn) {
ns.notifyEventFun(&NatsConnEvent{IsConnect:true})
log.Info("nats is reconnected",log.String("connUrl",nc.ConnectedUrl()))
}))
options = append(options,nats.MaxReconnects(-1))
options = append(options,nats.ReconnectWait(reconnectWait))
if ns.NoRandomize {
options = append(options,nats.DontRandomize())
}
ns.natsConn,err = nats.Connect(ns.natsUrl,options...)
if err != nil {
log.Error("Connect to nats fail",log.String("natsUrl",ns.natsUrl),log.ErrorAttr("err",err))
return err
}
//开始订阅
_,err = ns.natsConn.QueueSubscribe(ns.nodeSubTopic,"os", func(msg *nats.Msg) {
ns.processRpcRequest(msg.Data,msg.Header.Get("fnode"),ns.WriteResponse)
})
return err
}
func (ns *NatsServer) WriteResponse(processor IRpcProcessor, nodeId string,serviceMethod string, seq uint64, reply interface{}, rpcError RpcError){
var mReply []byte
var err error
if reply != nil {
mReply, err = processor.Marshal(reply)
if err != nil {
rpcError = ConvertError(err)
}
}
var rpcResponse RpcResponse
rpcResponse.RpcResponseData = processor.MakeRpcResponse(seq, rpcError, mReply)
bytes, err := processor.Marshal(rpcResponse.RpcResponseData)
defer processor.ReleaseRpcResponse(rpcResponse.RpcResponseData)
if err != nil {
log.Error("mashal RpcResponseData failed",log.String("serviceMethod",serviceMethod),log.ErrorAttr("error",err))
return
}
var compressBuff[]byte
bCompress := uint8(0)
if ns.compressBytesLen >0 && len(bytes) >= ns.compressBytesLen {
compressBuff,err = compressor.CompressBlock(bytes)
if err != nil {
log.Error("CompressBlock failed",log.String("serviceMethod",serviceMethod),log.ErrorAttr("error",err))
return
}
if len(compressBuff) < len(bytes) {
bytes = compressBuff
bCompress = 1<<7
}
}
sendData := make([]byte,0,4096)
byteTypeAndCompress := []byte{uint8(processor.GetProcessorType())|bCompress}
sendData = append(sendData,byteTypeAndCompress...)
sendData = append(sendData,bytes...)
err = ns.natsConn.PublishMsg(&nats.Msg{Subject: "oc."+nodeId, Data: sendData})
if cap(compressBuff) >0 {
compressor.CompressBufferCollection(compressBuff)
}
if err != nil {
log.Error("WriteMsg error,Rpc return is fail",log.String("nodeId",nodeId),log.String("serviceMethod",serviceMethod),log.ErrorAttr("error",err))
}
}
func (ns *NatsServer) Stop(){
if ns.natsConn != nil {
ns.natsConn.Close()
}
}
func (ns *NatsServer) initServer(natsUrl string, noRandomize bool,localNodeId string,compressBytesLen int,rpcHandleFinder RpcHandleFinder,notifyEventFun NotifyEventFun){
ns.natsUrl = natsUrl
ns.NoRandomize = noRandomize
ns.localNodeId = localNodeId
ns.compressBytesLen = compressBytesLen
ns.notifyEventFun = notifyEventFun
ns.initBaseServer(compressBytesLen,rpcHandleFinder)
ns.nodeSubTopic = "os."+localNodeId //服务器
}

662
rpc/origindiscover.pb.go Normal file
View File

@@ -0,0 +1,662 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.31.0
// protoc v4.24.0
// source: rpcproto/origindiscover.proto
package rpc
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type NodeInfo struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
NodeId string `protobuf:"bytes,1,opt,name=NodeId,proto3" json:"NodeId,omitempty"`
ListenAddr string `protobuf:"bytes,2,opt,name=ListenAddr,proto3" json:"ListenAddr,omitempty"`
MaxRpcParamLen uint32 `protobuf:"varint,3,opt,name=MaxRpcParamLen,proto3" json:"MaxRpcParamLen,omitempty"`
Private bool `protobuf:"varint,4,opt,name=Private,proto3" json:"Private,omitempty"`
Retire bool `protobuf:"varint,5,opt,name=Retire,proto3" json:"Retire,omitempty"`
PublicServiceList []string `protobuf:"bytes,6,rep,name=PublicServiceList,proto3" json:"PublicServiceList,omitempty"`
}
func (x *NodeInfo) Reset() {
*x = NodeInfo{}
if protoimpl.UnsafeEnabled {
mi := &file_rpcproto_origindiscover_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *NodeInfo) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NodeInfo) ProtoMessage() {}
func (x *NodeInfo) ProtoReflect() protoreflect.Message {
mi := &file_rpcproto_origindiscover_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NodeInfo.ProtoReflect.Descriptor instead.
func (*NodeInfo) Descriptor() ([]byte, []int) {
return file_rpcproto_origindiscover_proto_rawDescGZIP(), []int{0}
}
func (x *NodeInfo) GetNodeId() string {
if x != nil {
return x.NodeId
}
return ""
}
func (x *NodeInfo) GetListenAddr() string {
if x != nil {
return x.ListenAddr
}
return ""
}
func (x *NodeInfo) GetMaxRpcParamLen() uint32 {
if x != nil {
return x.MaxRpcParamLen
}
return 0
}
func (x *NodeInfo) GetPrivate() bool {
if x != nil {
return x.Private
}
return false
}
func (x *NodeInfo) GetRetire() bool {
if x != nil {
return x.Retire
}
return false
}
func (x *NodeInfo) GetPublicServiceList() []string {
if x != nil {
return x.PublicServiceList
}
return nil
}
// Client->Master
type RegServiceDiscoverReq struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
NodeInfo *NodeInfo `protobuf:"bytes,1,opt,name=nodeInfo,proto3" json:"nodeInfo,omitempty"`
}
func (x *RegServiceDiscoverReq) Reset() {
*x = RegServiceDiscoverReq{}
if protoimpl.UnsafeEnabled {
mi := &file_rpcproto_origindiscover_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *RegServiceDiscoverReq) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RegServiceDiscoverReq) ProtoMessage() {}
func (x *RegServiceDiscoverReq) ProtoReflect() protoreflect.Message {
mi := &file_rpcproto_origindiscover_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RegServiceDiscoverReq.ProtoReflect.Descriptor instead.
func (*RegServiceDiscoverReq) Descriptor() ([]byte, []int) {
return file_rpcproto_origindiscover_proto_rawDescGZIP(), []int{1}
}
func (x *RegServiceDiscoverReq) GetNodeInfo() *NodeInfo {
if x != nil {
return x.NodeInfo
}
return nil
}
// Master->Client
type SubscribeDiscoverNotify struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
MasterNodeId string `protobuf:"bytes,1,opt,name=MasterNodeId,proto3" json:"MasterNodeId,omitempty"`
IsFull bool `protobuf:"varint,2,opt,name=IsFull,proto3" json:"IsFull,omitempty"`
DelNodeId string `protobuf:"bytes,3,opt,name=DelNodeId,proto3" json:"DelNodeId,omitempty"`
NodeInfo []*NodeInfo `protobuf:"bytes,4,rep,name=nodeInfo,proto3" json:"nodeInfo,omitempty"`
}
func (x *SubscribeDiscoverNotify) Reset() {
*x = SubscribeDiscoverNotify{}
if protoimpl.UnsafeEnabled {
mi := &file_rpcproto_origindiscover_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *SubscribeDiscoverNotify) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SubscribeDiscoverNotify) ProtoMessage() {}
func (x *SubscribeDiscoverNotify) ProtoReflect() protoreflect.Message {
mi := &file_rpcproto_origindiscover_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SubscribeDiscoverNotify.ProtoReflect.Descriptor instead.
func (*SubscribeDiscoverNotify) Descriptor() ([]byte, []int) {
return file_rpcproto_origindiscover_proto_rawDescGZIP(), []int{2}
}
func (x *SubscribeDiscoverNotify) GetMasterNodeId() string {
if x != nil {
return x.MasterNodeId
}
return ""
}
func (x *SubscribeDiscoverNotify) GetIsFull() bool {
if x != nil {
return x.IsFull
}
return false
}
func (x *SubscribeDiscoverNotify) GetDelNodeId() string {
if x != nil {
return x.DelNodeId
}
return ""
}
func (x *SubscribeDiscoverNotify) GetNodeInfo() []*NodeInfo {
if x != nil {
return x.NodeInfo
}
return nil
}
// Client->Master
type NodeRetireReq struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
NodeInfo *NodeInfo `protobuf:"bytes,1,opt,name=nodeInfo,proto3" json:"nodeInfo,omitempty"`
}
func (x *NodeRetireReq) Reset() {
*x = NodeRetireReq{}
if protoimpl.UnsafeEnabled {
mi := &file_rpcproto_origindiscover_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *NodeRetireReq) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NodeRetireReq) ProtoMessage() {}
func (x *NodeRetireReq) ProtoReflect() protoreflect.Message {
mi := &file_rpcproto_origindiscover_proto_msgTypes[3]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NodeRetireReq.ProtoReflect.Descriptor instead.
func (*NodeRetireReq) Descriptor() ([]byte, []int) {
return file_rpcproto_origindiscover_proto_rawDescGZIP(), []int{3}
}
func (x *NodeRetireReq) GetNodeInfo() *NodeInfo {
if x != nil {
return x.NodeInfo
}
return nil
}
// Master->Client
type Empty struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
}
func (x *Empty) Reset() {
*x = Empty{}
if protoimpl.UnsafeEnabled {
mi := &file_rpcproto_origindiscover_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Empty) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Empty) ProtoMessage() {}
func (x *Empty) ProtoReflect() protoreflect.Message {
mi := &file_rpcproto_origindiscover_proto_msgTypes[4]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Empty.ProtoReflect.Descriptor instead.
func (*Empty) Descriptor() ([]byte, []int) {
return file_rpcproto_origindiscover_proto_rawDescGZIP(), []int{4}
}
// Client->Master
type Ping struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
NodeId string `protobuf:"bytes,1,opt,name=NodeId,proto3" json:"NodeId,omitempty"`
}
func (x *Ping) Reset() {
*x = Ping{}
if protoimpl.UnsafeEnabled {
mi := &file_rpcproto_origindiscover_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Ping) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Ping) ProtoMessage() {}
func (x *Ping) ProtoReflect() protoreflect.Message {
mi := &file_rpcproto_origindiscover_proto_msgTypes[5]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Ping.ProtoReflect.Descriptor instead.
func (*Ping) Descriptor() ([]byte, []int) {
return file_rpcproto_origindiscover_proto_rawDescGZIP(), []int{5}
}
func (x *Ping) GetNodeId() string {
if x != nil {
return x.NodeId
}
return ""
}
// Master->Client
type Pong struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Ok bool `protobuf:"varint,1,opt,name=ok,proto3" json:"ok,omitempty"`
}
func (x *Pong) Reset() {
*x = Pong{}
if protoimpl.UnsafeEnabled {
mi := &file_rpcproto_origindiscover_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Pong) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Pong) ProtoMessage() {}
func (x *Pong) ProtoReflect() protoreflect.Message {
mi := &file_rpcproto_origindiscover_proto_msgTypes[6]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Pong.ProtoReflect.Descriptor instead.
func (*Pong) Descriptor() ([]byte, []int) {
return file_rpcproto_origindiscover_proto_rawDescGZIP(), []int{6}
}
func (x *Pong) GetOk() bool {
if x != nil {
return x.Ok
}
return false
}
type UnRegServiceDiscoverReq struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
NodeId string `protobuf:"bytes,1,opt,name=NodeId,proto3" json:"NodeId,omitempty"`
}
func (x *UnRegServiceDiscoverReq) Reset() {
*x = UnRegServiceDiscoverReq{}
if protoimpl.UnsafeEnabled {
mi := &file_rpcproto_origindiscover_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *UnRegServiceDiscoverReq) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*UnRegServiceDiscoverReq) ProtoMessage() {}
func (x *UnRegServiceDiscoverReq) ProtoReflect() protoreflect.Message {
mi := &file_rpcproto_origindiscover_proto_msgTypes[7]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use UnRegServiceDiscoverReq.ProtoReflect.Descriptor instead.
func (*UnRegServiceDiscoverReq) Descriptor() ([]byte, []int) {
return file_rpcproto_origindiscover_proto_rawDescGZIP(), []int{7}
}
func (x *UnRegServiceDiscoverReq) GetNodeId() string {
if x != nil {
return x.NodeId
}
return ""
}
var File_rpcproto_origindiscover_proto protoreflect.FileDescriptor
var file_rpcproto_origindiscover_proto_rawDesc = []byte{
0x0a, 0x1d, 0x72, 0x70, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x72, 0x69, 0x67, 0x69,
0x6e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12,
0x03, 0x72, 0x70, 0x63, 0x22, 0xca, 0x01, 0x0a, 0x08, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66,
0x6f, 0x12, 0x16, 0x0a, 0x06, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
0x09, 0x52, 0x06, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x4c, 0x69, 0x73,
0x74, 0x65, 0x6e, 0x41, 0x64, 0x64, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x4c,
0x69, 0x73, 0x74, 0x65, 0x6e, 0x41, 0x64, 0x64, 0x72, 0x12, 0x26, 0x0a, 0x0e, 0x4d, 0x61, 0x78,
0x52, 0x70, 0x63, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x4c, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28,
0x0d, 0x52, 0x0e, 0x4d, 0x61, 0x78, 0x52, 0x70, 0x63, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x4c, 0x65,
0x6e, 0x12, 0x18, 0x0a, 0x07, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01,
0x28, 0x08, 0x52, 0x07, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x52,
0x65, 0x74, 0x69, 0x72, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x52, 0x65, 0x74,
0x69, 0x72, 0x65, 0x12, 0x2c, 0x0a, 0x11, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x53, 0x65, 0x72,
0x76, 0x69, 0x63, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x11,
0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x69, 0x73,
0x74, 0x22, 0x42, 0x0a, 0x15, 0x52, 0x65, 0x67, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44,
0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x52, 0x65, 0x71, 0x12, 0x29, 0x0a, 0x08, 0x6e, 0x6f,
0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x72,
0x70, 0x63, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x6e, 0x6f, 0x64,
0x65, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x9e, 0x01, 0x0a, 0x17, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72,
0x69, 0x62, 0x65, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x4e, 0x6f, 0x74, 0x69, 0x66,
0x79, 0x12, 0x22, 0x0a, 0x0c, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x6f, 0x64, 0x65, 0x49,
0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x4e,
0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x49, 0x73, 0x46, 0x75, 0x6c, 0x6c, 0x18,
0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x49, 0x73, 0x46, 0x75, 0x6c, 0x6c, 0x12, 0x1c, 0x0a,
0x09, 0x44, 0x65, 0x6c, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09,
0x52, 0x09, 0x44, 0x65, 0x6c, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x29, 0x0a, 0x08, 0x6e,
0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0d, 0x2e,
0x72, 0x70, 0x63, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x6e, 0x6f,
0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x3a, 0x0a, 0x0d, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65,
0x74, 0x69, 0x72, 0x65, 0x52, 0x65, 0x71, 0x12, 0x29, 0x0a, 0x08, 0x6e, 0x6f, 0x64, 0x65, 0x49,
0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x72, 0x70, 0x63, 0x2e,
0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x6e,
0x66, 0x6f, 0x22, 0x07, 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x1e, 0x0a, 0x04, 0x50,
0x69, 0x6e, 0x67, 0x12, 0x16, 0x0a, 0x06, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x18, 0x01, 0x20,
0x01, 0x28, 0x09, 0x52, 0x06, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x22, 0x16, 0x0a, 0x04, 0x50,
0x6f, 0x6e, 0x67, 0x12, 0x0e, 0x0a, 0x02, 0x6f, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52,
0x02, 0x6f, 0x6b, 0x22, 0x31, 0x0a, 0x17, 0x55, 0x6e, 0x52, 0x65, 0x67, 0x53, 0x65, 0x72, 0x76,
0x69, 0x63, 0x65, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x52, 0x65, 0x71, 0x12, 0x16,
0x0a, 0x06, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06,
0x4e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x42, 0x07, 0x5a, 0x05, 0x2e, 0x3b, 0x72, 0x70, 0x63, 0x62,
0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_rpcproto_origindiscover_proto_rawDescOnce sync.Once
file_rpcproto_origindiscover_proto_rawDescData = file_rpcproto_origindiscover_proto_rawDesc
)
func file_rpcproto_origindiscover_proto_rawDescGZIP() []byte {
file_rpcproto_origindiscover_proto_rawDescOnce.Do(func() {
file_rpcproto_origindiscover_proto_rawDescData = protoimpl.X.CompressGZIP(file_rpcproto_origindiscover_proto_rawDescData)
})
return file_rpcproto_origindiscover_proto_rawDescData
}
var file_rpcproto_origindiscover_proto_msgTypes = make([]protoimpl.MessageInfo, 8)
var file_rpcproto_origindiscover_proto_goTypes = []interface{}{
(*NodeInfo)(nil), // 0: rpc.NodeInfo
(*RegServiceDiscoverReq)(nil), // 1: rpc.RegServiceDiscoverReq
(*SubscribeDiscoverNotify)(nil), // 2: rpc.SubscribeDiscoverNotify
(*NodeRetireReq)(nil), // 3: rpc.NodeRetireReq
(*Empty)(nil), // 4: rpc.Empty
(*Ping)(nil), // 5: rpc.Ping
(*Pong)(nil), // 6: rpc.Pong
(*UnRegServiceDiscoverReq)(nil), // 7: rpc.UnRegServiceDiscoverReq
}
var file_rpcproto_origindiscover_proto_depIdxs = []int32{
0, // 0: rpc.RegServiceDiscoverReq.nodeInfo:type_name -> rpc.NodeInfo
0, // 1: rpc.SubscribeDiscoverNotify.nodeInfo:type_name -> rpc.NodeInfo
0, // 2: rpc.NodeRetireReq.nodeInfo:type_name -> rpc.NodeInfo
3, // [3:3] is the sub-list for method output_type
3, // [3:3] is the sub-list for method input_type
3, // [3:3] is the sub-list for extension type_name
3, // [3:3] is the sub-list for extension extendee
0, // [0:3] is the sub-list for field type_name
}
func init() { file_rpcproto_origindiscover_proto_init() }
func file_rpcproto_origindiscover_proto_init() {
if File_rpcproto_origindiscover_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_rpcproto_origindiscover_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*NodeInfo); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_rpcproto_origindiscover_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*RegServiceDiscoverReq); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_rpcproto_origindiscover_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SubscribeDiscoverNotify); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_rpcproto_origindiscover_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*NodeRetireReq); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_rpcproto_origindiscover_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Empty); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_rpcproto_origindiscover_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Ping); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_rpcproto_origindiscover_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Pong); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_rpcproto_origindiscover_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*UnRegServiceDiscoverReq); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_rpcproto_origindiscover_proto_rawDesc,
NumEnums: 0,
NumMessages: 8,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_rpcproto_origindiscover_proto_goTypes,
DependencyIndexes: file_rpcproto_origindiscover_proto_depIdxs,
MessageInfos: file_rpcproto_origindiscover_proto_msgTypes,
}.Build()
File_rpcproto_origindiscover_proto = out.File
file_rpcproto_origindiscover_proto_rawDesc = nil
file_rpcproto_origindiscover_proto_goTypes = nil
file_rpcproto_origindiscover_proto_depIdxs = nil
}

49
rpc/origindiscover.proto Normal file
View File

@@ -0,0 +1,49 @@
syntax = "proto3";
package rpc;
option go_package = ".;rpc";
message NodeInfo{
string NodeId = 1;
string ListenAddr = 2;
uint32 MaxRpcParamLen = 3;
bool Private = 4;
bool Retire = 5;
repeated string PublicServiceList = 6;
}
//Client->Master
message RegServiceDiscoverReq{
NodeInfo nodeInfo = 1;
}
//Master->Client
message SubscribeDiscoverNotify{
string MasterNodeId = 1;
bool IsFull = 2;
string DelNodeId = 3;
repeated NodeInfo nodeInfo = 4;
}
//Client->Master
message NodeRetireReq{
NodeInfo nodeInfo = 1;
}
//Master->Client
message Empty{
}
//Client->Master
message Ping{
string NodeId = 1;
}
//Master->Client
message Pong{
bool ok = 1;
}
message UnRegServiceDiscoverReq{
string NodeId = 1;
}

106
rpc/pbprocessor.go Normal file
View File

@@ -0,0 +1,106 @@
package rpc
import (
"github.com/duanhf2012/origin/v2/util/sync"
"google.golang.org/protobuf/proto"
"fmt"
)
type PBProcessor struct {
}
var rpcPbResponseDataPool =sync.NewPool(make(chan interface{},10240), func()interface{}{
return &PBRpcResponseData{}
})
var rpcPbRequestDataPool =sync.NewPool(make(chan interface{},10240), func()interface{}{
return &PBRpcRequestData{}
})
func (slf *PBRpcRequestData) MakeRequest(seq uint64,rpcMethodId uint32,serviceMethod string,noReply bool,inParam []byte) *PBRpcRequestData{
slf.Seq = seq
slf.RpcMethodId = rpcMethodId
slf.ServiceMethod = serviceMethod
slf.NoReply = noReply
slf.InParam = inParam
return slf
}
func (slf *PBRpcResponseData) MakeRespone(seq uint64,err RpcError,reply []byte) *PBRpcResponseData{
slf.Seq = seq
slf.Error = err.Error()
slf.Reply = reply
return slf
}
func (slf *PBProcessor) Marshal(v interface{}) ([]byte, error){
return proto.Marshal(v.(proto.Message))
}
func (slf *PBProcessor) Unmarshal(data []byte, msg interface{}) error{
protoMsg,ok := msg.(proto.Message)
if ok == false {
return fmt.Errorf("%+v is not of proto.Message type",msg)
}
return proto.Unmarshal(data, protoMsg)
}
func (slf *PBProcessor) MakeRpcRequest(seq uint64,rpcMethodId uint32,serviceMethod string,noReply bool,inParam []byte) IRpcRequestData{
pGogoPbRpcRequestData := rpcPbRequestDataPool.Get().(*PBRpcRequestData)
pGogoPbRpcRequestData.MakeRequest(seq,rpcMethodId,serviceMethod,noReply,inParam)
return pGogoPbRpcRequestData
}
func (slf *PBProcessor) MakeRpcResponse(seq uint64,err RpcError,reply []byte) IRpcResponseData {
pPBRpcResponseData := rpcPbResponseDataPool.Get().(*PBRpcResponseData)
pPBRpcResponseData.MakeRespone(seq,err,reply)
return pPBRpcResponseData
}
func (slf *PBProcessor) ReleaseRpcRequest(rpcRequestData IRpcRequestData){
rpcPbRequestDataPool.Put(rpcRequestData)
}
func (slf *PBProcessor) ReleaseRpcResponse(rpcResponseData IRpcResponseData){
rpcPbResponseDataPool.Put(rpcResponseData)
}
func (slf *PBProcessor) IsParse(param interface{}) bool {
_,ok := param.(proto.Message)
return ok
}
func (slf *PBProcessor) GetProcessorType() RpcProcessorType{
return RpcProcessorPB
}
func (slf *PBProcessor) Clone(src interface{}) (interface{},error){
srcMsg,ok := src.(proto.Message)
if ok == false {
return nil,fmt.Errorf("param is not of proto.message type")
}
return proto.Clone(srcMsg),nil
}
func (slf *PBRpcRequestData) IsNoReply() bool{
return slf.GetNoReply()
}
func (slf *PBRpcResponseData) GetErr() *RpcError {
if slf.GetError() == "" {
return nil
}
err := RpcError(slf.GetError())
return &err
}

View File

@@ -1,6 +1,7 @@
package rpc
type IRpcProcessor interface {
Clone(src interface{}) (interface{},error)
Marshal(v interface{}) ([]byte, error) //b表示自定义缓冲区可以填nil由系统自动分配
Unmarshal(data []byte, v interface{}) error
MakeRpcRequest(seq uint64,rpcMethodId uint32,serviceMethod string,noReply bool,inParam []byte) IRpcRequestData

263
rpc/protorpc.pb.go Normal file
View File

@@ -0,0 +1,263 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.31.0
// protoc v3.11.4
// source: test/rpc/protorpc.proto
package rpc
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type PBRpcRequestData struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Seq uint64 `protobuf:"varint,1,opt,name=Seq,proto3" json:"Seq,omitempty"`
RpcMethodId uint32 `protobuf:"varint,2,opt,name=RpcMethodId,proto3" json:"RpcMethodId,omitempty"`
ServiceMethod string `protobuf:"bytes,3,opt,name=ServiceMethod,proto3" json:"ServiceMethod,omitempty"`
NoReply bool `protobuf:"varint,4,opt,name=NoReply,proto3" json:"NoReply,omitempty"`
InParam []byte `protobuf:"bytes,5,opt,name=InParam,proto3" json:"InParam,omitempty"`
}
func (x *PBRpcRequestData) Reset() {
*x = PBRpcRequestData{}
if protoimpl.UnsafeEnabled {
mi := &file_test_rpc_protorpc_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *PBRpcRequestData) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PBRpcRequestData) ProtoMessage() {}
func (x *PBRpcRequestData) ProtoReflect() protoreflect.Message {
mi := &file_test_rpc_protorpc_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PBRpcRequestData.ProtoReflect.Descriptor instead.
func (*PBRpcRequestData) Descriptor() ([]byte, []int) {
return file_test_rpc_protorpc_proto_rawDescGZIP(), []int{0}
}
func (x *PBRpcRequestData) GetSeq() uint64 {
if x != nil {
return x.Seq
}
return 0
}
func (x *PBRpcRequestData) GetRpcMethodId() uint32 {
if x != nil {
return x.RpcMethodId
}
return 0
}
func (x *PBRpcRequestData) GetServiceMethod() string {
if x != nil {
return x.ServiceMethod
}
return ""
}
func (x *PBRpcRequestData) GetNoReply() bool {
if x != nil {
return x.NoReply
}
return false
}
func (x *PBRpcRequestData) GetInParam() []byte {
if x != nil {
return x.InParam
}
return nil
}
type PBRpcResponseData struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Seq uint64 `protobuf:"varint,1,opt,name=Seq,proto3" json:"Seq,omitempty"`
Error string `protobuf:"bytes,2,opt,name=Error,proto3" json:"Error,omitempty"`
Reply []byte `protobuf:"bytes,3,opt,name=Reply,proto3" json:"Reply,omitempty"`
}
func (x *PBRpcResponseData) Reset() {
*x = PBRpcResponseData{}
if protoimpl.UnsafeEnabled {
mi := &file_test_rpc_protorpc_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *PBRpcResponseData) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PBRpcResponseData) ProtoMessage() {}
func (x *PBRpcResponseData) ProtoReflect() protoreflect.Message {
mi := &file_test_rpc_protorpc_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PBRpcResponseData.ProtoReflect.Descriptor instead.
func (*PBRpcResponseData) Descriptor() ([]byte, []int) {
return file_test_rpc_protorpc_proto_rawDescGZIP(), []int{1}
}
func (x *PBRpcResponseData) GetSeq() uint64 {
if x != nil {
return x.Seq
}
return 0
}
func (x *PBRpcResponseData) GetError() string {
if x != nil {
return x.Error
}
return ""
}
func (x *PBRpcResponseData) GetReply() []byte {
if x != nil {
return x.Reply
}
return nil
}
var File_test_rpc_protorpc_proto protoreflect.FileDescriptor
var file_test_rpc_protorpc_proto_rawDesc = []byte{
0x0a, 0x17, 0x74, 0x65, 0x73, 0x74, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x72, 0x70, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x03, 0x72, 0x70, 0x63, 0x22, 0xa0,
0x01, 0x0a, 0x10, 0x50, 0x42, 0x52, 0x70, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x44,
0x61, 0x74, 0x61, 0x12, 0x10, 0x0a, 0x03, 0x53, 0x65, 0x71, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04,
0x52, 0x03, 0x53, 0x65, 0x71, 0x12, 0x20, 0x0a, 0x0b, 0x52, 0x70, 0x63, 0x4d, 0x65, 0x74, 0x68,
0x6f, 0x64, 0x49, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x52, 0x70, 0x63, 0x4d,
0x65, 0x74, 0x68, 0x6f, 0x64, 0x49, 0x64, 0x12, 0x24, 0x0a, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69,
0x63, 0x65, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d,
0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x18, 0x0a,
0x07, 0x4e, 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07,
0x4e, 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x49, 0x6e, 0x50, 0x61, 0x72,
0x61, 0x6d, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x49, 0x6e, 0x50, 0x61, 0x72, 0x61,
0x6d, 0x22, 0x51, 0x0a, 0x11, 0x50, 0x42, 0x52, 0x70, 0x63, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
0x73, 0x65, 0x44, 0x61, 0x74, 0x61, 0x12, 0x10, 0x0a, 0x03, 0x53, 0x65, 0x71, 0x18, 0x01, 0x20,
0x01, 0x28, 0x04, 0x52, 0x03, 0x53, 0x65, 0x71, 0x12, 0x14, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f,
0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x14,
0x0a, 0x05, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x52,
0x65, 0x70, 0x6c, 0x79, 0x42, 0x07, 0x5a, 0x05, 0x2e, 0x3b, 0x72, 0x70, 0x63, 0x62, 0x06, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_test_rpc_protorpc_proto_rawDescOnce sync.Once
file_test_rpc_protorpc_proto_rawDescData = file_test_rpc_protorpc_proto_rawDesc
)
func file_test_rpc_protorpc_proto_rawDescGZIP() []byte {
file_test_rpc_protorpc_proto_rawDescOnce.Do(func() {
file_test_rpc_protorpc_proto_rawDescData = protoimpl.X.CompressGZIP(file_test_rpc_protorpc_proto_rawDescData)
})
return file_test_rpc_protorpc_proto_rawDescData
}
var file_test_rpc_protorpc_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
var file_test_rpc_protorpc_proto_goTypes = []interface{}{
(*PBRpcRequestData)(nil), // 0: rpc.PBRpcRequestData
(*PBRpcResponseData)(nil), // 1: rpc.PBRpcResponseData
}
var file_test_rpc_protorpc_proto_depIdxs = []int32{
0, // [0:0] is the sub-list for method output_type
0, // [0:0] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func init() { file_test_rpc_protorpc_proto_init() }
func file_test_rpc_protorpc_proto_init() {
if File_test_rpc_protorpc_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_test_rpc_protorpc_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*PBRpcRequestData); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_test_rpc_protorpc_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*PBRpcResponseData); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_test_rpc_protorpc_proto_rawDesc,
NumEnums: 0,
NumMessages: 2,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_test_rpc_protorpc_proto_goTypes,
DependencyIndexes: file_test_rpc_protorpc_proto_depIdxs,
MessageInfos: file_test_rpc_protorpc_proto_msgTypes,
}.Build()
File_test_rpc_protorpc_proto = out.File
file_test_rpc_protorpc_proto_rawDesc = nil
file_test_rpc_protorpc_proto_goTypes = nil
file_test_rpc_protorpc_proto_depIdxs = nil
}

View File

@@ -1,8 +1,8 @@
syntax = "proto3";
package rpc;
option go_package = "./rpc";
option go_package = ".;rpc";
message GoGoPBRpcRequestData{
message PBRpcRequestData{
uint64 Seq = 1;
uint32 RpcMethodId = 2;
string ServiceMethod = 3;
@@ -10,7 +10,7 @@ message GoGoPBRpcRequestData{
bytes InParam = 5;
}
message GoGoPBRpcResponseData{
message PBRpcResponseData{
uint64 Seq = 1;
string Error = 2;
bytes Reply = 3;

1621
rpc/rank.pb.go Normal file

File diff suppressed because it is too large Load Diff

130
rpc/rank.proto Normal file
View File

@@ -0,0 +1,130 @@
syntax = "proto3";
package rpc;
option go_package = ".;rpc";
message SetSortAndExtendData{
bool IsSortData = 1; //是否为排序字段,为true时修改Sort字段否则修改Extend数据
int32 Pos = 2; //排序位置
int64 Data = 3; //排序值
}
//自增值
message IncreaseRankData {
uint64 RankId = 1; //排行榜的ID
uint64 Key = 2; //数据主建
repeated ExtendIncData Extend = 3; //扩展数据
repeated int64 IncreaseSortData = 4;//自增排行数值
repeated SetSortAndExtendData SetSortAndExtendData = 5;//设置排序数据值
bool ReturnRankData = 6; //是否查找最新排名否则不返回排行Rank字段
bool InsertDataOnNonExistent = 7; //为true时:存在不进行更新不存在则插入InitData与InitSortData数据。为false时忽略不对InitData与InitSortData数据
bytes InitData = 8; //不参与排行的数据
repeated int64 InitSortData = 9; //参与排行的数据
}
message IncreaseRankDataRet{
RankPosData PosData = 1;
}
//用于单独刷新排行榜数据
message UpdateRankData {
uint64 RankId = 1; //排行榜的ID
uint64 Key = 2; //数据主建
bytes Data = 3; //数据部分
}
message UpdateRankDataRet {
bool Ret = 1;
}
// RankPosData 排行数据——查询返回
message RankPosData {
uint64 Key = 1; //数据主建
uint64 Rank = 2; //名次
repeated int64 SortData = 3; //参与排行的数据
bytes Data = 4; //不参与排行的数据
repeated int64 ExtendData = 5; //扩展数据
}
// RankList 排行榜数据
message RankList {
uint64 RankId = 1; //排行榜类型
string RankName = 2; //排行榜名称
int32 SkipListLevel = 3; //排行榜level-生成的跳表的level, 8/16/32/64等
bool IsDec = 4; //不参与排行的数据
uint64 MaxRank = 5; //最大排名
int64 ExpireMs = 6; //有效时间0永不过期
}
// UpsetRankData 更新排行榜数据
message UpsetRankData {
uint64 RankId = 1; //排行榜的ID
repeated RankData RankDataList = 2; //排行数据
bool FindNewRank = 3; //是否查找最新排名
}
message ExtendIncData {
int64 InitValue = 1;
int64 IncreaseValue = 2;
}
// RankData 排行数据
message RankData {
uint64 Key = 1; //数据主建
repeated int64 SortData = 2; //参与排行的数据
bytes Data = 3; //不参与排行的数据
repeated ExtendIncData ExData = 4; //扩展增量数据
}
// DeleteByKey 删除排行榜数据
message DeleteByKey {
uint64 RankId = 1; //排行榜的分类ID
repeated uint64 KeyList = 2; //排行数据
}
// AddRankList 新增排行榜
message AddRankList {
repeated RankList AddList = 1; //添加的排行榜列表
}
// FindRankDataByKey 查找排行信息
message FindRankDataByKey {
uint64 RankId = 1; //排行榜的ID
uint64 Key = 2; //排行的key
}
// FindRankDataByRank 查找排行信息
message FindRankDataByRank {
uint64 RankId = 1; //排行榜的ID
uint64 Rank = 2; //排行名次
}
// FindRankDataList 查找排行信息
message FindRankDataList {
uint64 RankId = 1; //排行榜的ID
uint64 StartRank = 2; //排行的位置 0开始
uint64 Count = 3; //查询格式
uint64 Key = 4; //附带一个Key查询排行信息
}
// RankDataList
message RankDataList {
uint64 RankDataCount = 1; //排行长度
repeated RankPosData RankPosDataList = 2; //排行数据
RankPosData KeyRank = 3; //附带的Key查询排行结果信息
}
message RankInfo{
uint64 Key = 1;
uint64 Rank = 2;
}
// RankResult
message RankResult {
int32 AddCount = 1;//新增数量
int32 ModifyCount = 2; //修改数量
int32 RemoveCount = 3;//删除数量
repeated RankInfo NewRank = 4; //新的排名名次只有UpsetRankData.FindNewRank为true时才生效
}

152
rpc/rclient.go Normal file
View File

@@ -0,0 +1,152 @@
package rpc
import (
"fmt"
"github.com/duanhf2012/origin/v2/log"
"github.com/duanhf2012/origin/v2/network"
"math"
"reflect"
"runtime"
"sync/atomic"
"time"
)
//跨结点连接的Client
type RClient struct {
selfClient *Client
network.TCPClient
conn *network.TCPConn
notifyEventFun NotifyEventFun
}
func (rc *RClient) IsConnected() bool {
rc.Lock()
defer rc.Unlock()
return rc.conn != nil && rc.conn.IsConnected() == true
}
func (rc *RClient) GetConn() *network.TCPConn{
rc.Lock()
conn := rc.conn
rc.Unlock()
return conn
}
func (rc *RClient) SetConn(conn *network.TCPConn){
rc.Lock()
rc.conn = conn
rc.Unlock()
}
func (rc *RClient) WriteMsg (nodeId string,args ...[]byte) error{
return rc.conn.WriteMsg(args...)
}
func (rc *RClient) Go(nodeId string,timeout time.Duration,rpcHandler IRpcHandler,noReply bool, serviceMethod string, args interface{}, reply interface{}) *Call {
_, processor := GetProcessorType(args)
InParam, err := processor.Marshal(args)
if err != nil {
log.Error("Marshal is fail",log.ErrorAttr("error",err))
call := MakeCall()
call.DoError(err)
return call
}
return rc.selfClient.rawGo(nodeId,rc,timeout,rpcHandler,processor, noReply, 0, serviceMethod, InParam, reply)
}
func (rc *RClient) RawGo(nodeId string,timeout time.Duration,rpcHandler IRpcHandler,processor IRpcProcessor, noReply bool, rpcMethodId uint32, serviceMethod string, rawArgs []byte, reply interface{}) *Call {
return rc.selfClient.rawGo(nodeId,rc,timeout,rpcHandler,processor, noReply, rpcMethodId, serviceMethod, rawArgs, reply)
}
func (rc *RClient) AsyncCall(nodeId string,timeout time.Duration,rpcHandler IRpcHandler, serviceMethod string, callback reflect.Value, args interface{}, replyParam interface{},cancelable bool) (CancelRpc,error) {
cancelRpc,err := rc.selfClient.asyncCall(nodeId,rc,timeout,rpcHandler, serviceMethod, callback, args, replyParam,cancelable)
if err != nil {
callback.Call([]reflect.Value{reflect.ValueOf(replyParam), reflect.ValueOf(err)})
}
return cancelRpc,nil
}
func (rc *RClient) Run() {
defer func() {
if r := recover(); r != nil {
buf := make([]byte, 4096)
l := runtime.Stack(buf, false)
errString := fmt.Sprint(r)
log.Dump(string(buf[:l]),log.String("error",errString))
}
}()
var eventData RpcConnEvent
eventData.IsConnect = true
eventData.NodeId = rc.selfClient.GetTargetNodeId()
rc.notifyEventFun(&eventData)
for {
bytes, err := rc.conn.ReadMsg()
if err != nil {
log.Error("rclient read msg is failed",log.ErrorAttr("error",err))
return
}
err = rc.selfClient.processRpcResponse(bytes)
rc.conn.ReleaseReadMsg(bytes)
if err != nil {
return
}
}
}
func (rc *RClient) OnClose() {
var connEvent RpcConnEvent
connEvent.IsConnect = false
connEvent.NodeId = rc.selfClient.GetTargetNodeId()
rc.notifyEventFun(&connEvent)
}
func NewRClient(targetNodeId string, addr string, maxRpcParamLen uint32,compressBytesLen int,callSet *CallSet,notifyEventFun NotifyEventFun) *Client{
client := &Client{}
client.clientId = atomic.AddUint32(&clientSeq, 1)
client.targetNodeId = targetNodeId
client.compressBytesLen = compressBytesLen
c:= &RClient{}
c.selfClient = client
c.Addr = addr
c.ConnectInterval = DefaultConnectInterval
c.PendingWriteNum = DefaultMaxPendingWriteNum
c.AutoReconnect = true
c.notifyEventFun = notifyEventFun
c.ConnNum = DefaultRpcConnNum
c.LenMsgLen = DefaultRpcLenMsgLen
c.MinMsgLen = DefaultRpcMinMsgLen
c.ReadDeadline = Default_ReadWriteDeadline
c.WriteDeadline = Default_ReadWriteDeadline
c.LittleEndian = LittleEndian
c.NewAgent = client.NewClientAgent
if maxRpcParamLen > 0 {
c.MaxMsgLen = maxRpcParamLen
} else {
c.MaxMsgLen = math.MaxUint32
}
client.IRealClient = c
client.CallSet = callSet
c.Start()
return client
}
func (rc *RClient) Close(waitDone bool) {
rc.TCPClient.Close(waitDone)
rc.selfClient.cleanPending()
}
func (rc *RClient) Bind(server IServer){
}

View File

@@ -1,7 +1,7 @@
package rpc
import (
"github.com/duanhf2012/origin/util/sync"
"github.com/duanhf2012/origin/v2/util/sync"
"reflect"
"time"
)
@@ -51,12 +51,6 @@ type IRpcResponseData interface {
GetReply() []byte
}
type IRawInputArgs interface {
GetRawData() []byte //获取原始数据
DoFree() //处理完成,回收内存
DoEscape() //逃逸,GC自动回收
}
type RpcHandleFinder interface {
FindRpcHandler(serviceMethod string) IRpcHandler
}
@@ -74,7 +68,16 @@ type Call struct {
connId int
callback *reflect.Value
rpcHandler IRpcHandler
callTime time.Time
TimeOut time.Duration
}
type RpcCancel struct {
Cli *Client
CallSeq uint64
}
func (rc *RpcCancel) CancelRpc(){
rc.Cli.RemovePending(rc.CallSeq)
}
func (slf *RpcRequest) Clear() *RpcRequest{
@@ -108,6 +111,15 @@ func (rpcResponse *RpcResponse) Clear() *RpcResponse{
return rpcResponse
}
func (call *Call) DoError(err error){
call.Err = err
call.done <- call
}
func (call *Call) DoOK(){
call.done <- call
}
func (call *Call) Clear() *Call{
call.Seq = 0
call.ServiceMethod = ""
@@ -121,6 +133,8 @@ func (call *Call) Clear() *Call{
call.connId = 0
call.callback = nil
call.rpcHandler = nil
call.TimeOut = 0
return call
}

23
rpc/rpcevent.go Normal file
View File

@@ -0,0 +1,23 @@
package rpc
import "github.com/duanhf2012/origin/v2/event"
type NotifyEventFun func (event event.IEvent)
// RpcConnEvent Node结点连接事件
type RpcConnEvent struct{
IsConnect bool
NodeId string
}
func (rc *RpcConnEvent) GetEventType() event.EventType{
return event.Sys_Event_Node_Conn_Event
}
type NatsConnEvent struct {
IsConnect bool
}
func (nc *NatsConnEvent) GetEventType() event.EventType{
return event.Sys_Event_Nats_Conn_Event
}

View File

@@ -3,19 +3,21 @@ package rpc
import (
"errors"
"fmt"
"github.com/duanhf2012/origin/log"
"github.com/duanhf2012/origin/v2/log"
"reflect"
"runtime"
"strconv"
"strings"
"unicode"
"unicode/utf8"
"github.com/duanhf2012/origin/v2/event"
"time"
)
const maxClusterNode int = 128
const maxClusterNode int = 32
type FuncRpcClient func(nodeId int, serviceMethod string, client []*Client) (error, int)
type FuncRpcServer func() *Server
type FuncRpcClient func(nodeId string, serviceMethod string,filterRetire bool, client []*Client) (error, []*Client)
type FuncRpcServer func() IServer
const NodeIdNull = ""
var nilError = reflect.Zero(reflect.TypeOf((*error)(nil)).Elem())
@@ -45,10 +47,7 @@ type RpcMethodInfo struct {
rpcProcessorType RpcProcessorType
}
type RawRpcCallBack interface {
Unmarshal(data []byte) (interface{}, error)
CB(data interface{})
}
type RawRpcCallBack func(rawData []byte)
type IRpcHandlerChannel interface {
PushRpcResponse(call *Call) error
@@ -64,15 +63,30 @@ type RpcHandler struct {
funcRpcClient FuncRpcClient
funcRpcServer FuncRpcServer
pClientList []*Client
//pClientList []*Client
}
type TriggerRpcEvent func(bConnect bool, clientSeq uint32, nodeId int)
type IRpcListener interface {
OnNodeConnected(nodeId int)
OnNodeDisconnect(nodeId int)
//type TriggerRpcConnEvent func(bConnect bool, clientSeq uint32, nodeId string)
type NotifyEventToAllService func(event event.IEvent)
type INodeConnListener interface {
OnNodeConnected(nodeId string)
OnNodeDisconnect(nodeId string)
}
type INatsConnListener interface {
OnNatsConnected()
OnNatsDisconnect()
}
type IDiscoveryServiceListener interface {
OnDiscoveryService(nodeId string, serviceName []string)
OnUnDiscoveryService(nodeId string, serviceName []string)
}
type CancelRpc func()
func emptyCancelRpc(){}
type IRpcHandler interface {
IRpcHandlerChannel
GetName() string
@@ -80,17 +94,25 @@ type IRpcHandler interface {
GetRpcHandler() IRpcHandler
HandlerRpcRequest(request *RpcRequest)
HandlerRpcResponseCB(call *Call)
CallMethod(ServiceMethod string, param interface{}, reply interface{}) error
AsyncCall(serviceMethod string, args interface{}, callback interface{}) error
CallMethod(client *Client,ServiceMethod string, param interface{},callBack reflect.Value, reply interface{}) error
Call(serviceMethod string, args interface{}, reply interface{}) error
CallNode(nodeId string, serviceMethod string, args interface{}, reply interface{}) error
AsyncCall(serviceMethod string, args interface{}, callback interface{}) error
AsyncCallNode(nodeId string, serviceMethod string, args interface{}, callback interface{}) error
CallWithTimeout(timeout time.Duration,serviceMethod string, args interface{}, reply interface{}) error
CallNodeWithTimeout(timeout time.Duration,nodeId string, serviceMethod string, args interface{}, reply interface{}) error
AsyncCallWithTimeout(timeout time.Duration,serviceMethod string, args interface{}, callback interface{}) (CancelRpc,error)
AsyncCallNodeWithTimeout(timeout time.Duration,nodeId string, serviceMethod string, args interface{}, callback interface{}) (CancelRpc,error)
Go(serviceMethod string, args interface{}) error
AsyncCallNode(nodeId int, serviceMethod string, args interface{}, callback interface{}) error
CallNode(nodeId int, serviceMethod string, args interface{}, reply interface{}) error
GoNode(nodeId int, serviceMethod string, args interface{}) error
RawGoNode(rpcProcessorType RpcProcessorType, nodeId int, rpcMethodId uint32, serviceName string, rawArgs IRawInputArgs) error
GoNode(nodeId string, serviceMethod string, args interface{}) error
RawGoNode(rpcProcessorType RpcProcessorType, nodeId string, rpcMethodId uint32, serviceName string, rawArgs []byte) error
CastGo(serviceMethod string, args interface{}) error
IsSingleCoroutine() bool
UnmarshalInParam(rpcProcessor IRpcProcessor, serviceMethod string, rawRpcMethodId uint32, inParam []byte) (interface{}, error)
GetRpcServer() FuncRpcServer
}
func reqHandlerNull(Returns interface{}, Err RpcError) {
@@ -113,7 +135,6 @@ func (handler *RpcHandler) InitRpcHandler(rpcHandler IRpcHandler, getClientFun F
handler.mapFunctions = map[string]RpcMethodInfo{}
handler.funcRpcClient = getClientFun
handler.funcRpcServer = getServerFun
handler.pClientList = make([]*Client, maxClusterNode)
handler.RegisterRpc(rpcHandler)
}
@@ -135,7 +156,7 @@ func (handler *RpcHandler) isExportedOrBuiltinType(t reflect.Type) bool {
func (handler *RpcHandler) suitableMethods(method reflect.Method) error {
//只有RPC_开头的才能被调用
if strings.Index(method.Name, "RPC_") != 0 {
if strings.Index(method.Name, "RPC_") != 0 && strings.Index(method.Name, "RPC") != 0 {
return nil
}
@@ -201,7 +222,7 @@ func (handler *RpcHandler) HandlerRpcResponseCB(call *Call) {
buf := make([]byte, 4096)
l := runtime.Stack(buf, false)
errString := fmt.Sprint(r)
log.SError("core dump info[", errString, "]\n", string(buf[:l]))
log.Dump(string(buf[:l]),log.String("error",errString))
}
}()
@@ -223,7 +244,7 @@ func (handler *RpcHandler) HandlerRpcRequest(request *RpcRequest) {
buf := make([]byte, 4096)
l := runtime.Stack(buf, false)
errString := fmt.Sprint(r)
log.SError("Handler Rpc ", request.RpcRequestData.GetServiceMethod(), " Core dump info[", errString, "]\n", string(buf[:l]))
log.Dump(string(buf[:l]),log.String("error",errString))
rpcErr := RpcError("call error : core dumps")
if request.requestHandle != nil {
request.requestHandle(nil, rpcErr)
@@ -236,19 +257,24 @@ func (handler *RpcHandler) HandlerRpcRequest(request *RpcRequest) {
if rawRpcId > 0 {
v, ok := handler.mapRawFunctions[rawRpcId]
if ok == false {
log.SError("RpcHandler cannot find request rpc id", rawRpcId)
log.Error("RpcHandler cannot find request rpc id",log.Uint32("rawRpcId",rawRpcId))
return
}
rawData,ok := request.inParam.([]byte)
if ok == false {
log.Error("RpcHandler cannot convert",log.String("RpcHandlerName",handler.rpcHandler.GetName()),log.Uint32("rawRpcId",rawRpcId))
return
}
v.CB(request.inParam)
v(rawData)
return
}
//普通的rpc请求
v, ok := handler.mapFunctions[request.RpcRequestData.GetServiceMethod()]
if ok == false {
err := "RpcHandler " + handler.rpcHandler.GetName() + "cannot find " + request.RpcRequestData.GetServiceMethod()
log.SError(err)
err := "RpcHandler " + handler.rpcHandler.GetName() + " cannot find " + request.RpcRequestData.GetServiceMethod()
log.Error("HandlerRpcRequest cannot find serviceMethod",log.String("RpcHandlerName",handler.rpcHandler.GetName()),log.String("serviceMethod",request.RpcRequestData.GetServiceMethod()))
if request.requestHandle != nil {
request.requestHandle(nil, RpcError(err))
}
@@ -279,92 +305,154 @@ func (handler *RpcHandler) HandlerRpcRequest(request *RpcRequest) {
paramList = append(paramList, oParam) //输出参数
} else if request.requestHandle != nil && v.hasResponder == false { //调用方有返回值,但被调用函数没有返回参数
rErr := "Call Rpc " + request.RpcRequestData.GetServiceMethod() + " without return parameter!"
log.SError(rErr)
log.Error("call serviceMethod without return parameter",log.String("serviceMethod",request.RpcRequestData.GetServiceMethod()))
request.requestHandle(nil, RpcError(rErr))
return
}
requestHanle := request.requestHandle
returnValues := v.method.Func.Call(paramList)
errInter := returnValues[0].Interface()
if errInter != nil {
err = errInter.(error)
}
if request.requestHandle != nil && v.hasResponder == false {
request.requestHandle(oParam.Interface(), ConvertError(err))
if v.hasResponder == false && requestHanle != nil {
requestHanle(oParam.Interface(), ConvertError(err))
}
}
func (handler *RpcHandler) CallMethod(ServiceMethod string, param interface{}, reply interface{}) error {
func (handler *RpcHandler) CallMethod(client *Client,ServiceMethod string, param interface{},callBack reflect.Value, reply interface{}) error {
var err error
v, ok := handler.mapFunctions[ServiceMethod]
if ok == false {
err = errors.New("RpcHandler " + handler.rpcHandler.GetName() + " cannot find" + ServiceMethod)
log.SError(err.Error())
log.Error("CallMethod cannot find serviceMethod",log.String("rpcHandlerName",handler.rpcHandler.GetName()),log.String("serviceMethod",ServiceMethod))
return err
}
var paramList []reflect.Value
paramList = append(paramList, reflect.ValueOf(handler.GetRpcHandler())) //接受者
paramList = append(paramList, reflect.ValueOf(param))
paramList = append(paramList, reflect.ValueOf(reply)) //输出参数
var returnValues []reflect.Value
var pCall *Call
var callSeq uint64
if v.hasResponder == true {
paramList = append(paramList, reflect.ValueOf(handler.GetRpcHandler())) //接受者
pCall = MakeCall()
pCall.callback = &callBack
pCall.Seq = client.generateSeq()
callSeq = pCall.Seq
pCall.TimeOut = DefaultRpcTimeout
pCall.ServiceMethod = ServiceMethod
client.AddPending(pCall)
returnValues := v.method.Func.Call(paramList)
errInter := returnValues[0].Interface()
if errInter != nil {
err = errInter.(error)
//有返回值时
if reply != nil {
//如果是Call同步调用
hander :=func(Returns interface{}, Err RpcError) {
rpcCall := client.RemovePending(callSeq)
if rpcCall == nil {
log.Error("cannot find call seq",log.Uint64("seq",callSeq))
return
}
//解析数据
if len(Err)!=0 {
rpcCall.Err = Err
}else if Returns != nil {
_, processor := GetProcessorType(Returns)
var bytes []byte
bytes,rpcCall.Err = processor.Marshal(Returns)
if rpcCall.Err == nil {
rpcCall.Err = processor.Unmarshal(bytes,reply)
}
}
//如果找不到,说明已经超时
rpcCall.Reply = reply
rpcCall.done<-rpcCall
}
paramList = append(paramList, reflect.ValueOf(hander))
}else{//无返回值时,是一个requestHandlerNull空回调
paramList = append(paramList, callBack)
}
paramList = append(paramList, reflect.ValueOf(param))
//rpc函数被调用
returnValues = v.method.Func.Call(paramList)
//判断返回值是否错误,有错误时则回调
errInter := returnValues[0].Interface()
if errInter != nil && callBack!=requestHandlerNull{
err = errInter.(error)
callBack.Call([]reflect.Value{reflect.ValueOf(reply), reflect.ValueOf(err)})
}
}else{
paramList = append(paramList, reflect.ValueOf(handler.GetRpcHandler())) //接受者
paramList = append(paramList, reflect.ValueOf(param))
//被调用RPC函数有返回值时
if v.outParamValue.IsValid() {
//不带返回值参数的RPC函数
if reply == nil {
paramList = append(paramList, reflect.New(v.outParamValue.Type().Elem()))
}else{
//带返回值参数的RPC函数
paramList = append(paramList, reflect.ValueOf(reply)) //输出参数
}
}
returnValues = v.method.Func.Call(paramList)
errInter := returnValues[0].Interface()
//如果无回调
if callBack != requestHandlerNull {
valErr := nilError
if errInter != nil {
err = errInter.(error)
valErr = reflect.ValueOf(err)
}
callBack.Call([]reflect.Value{reflect.ValueOf(reply),valErr })
}
}
rpcCall := client.FindPending(callSeq)
if rpcCall!=nil {
err = rpcCall.Done().Err
if rpcCall.callback!= nil {
valErr := nilError
if rpcCall.Err != nil {
valErr = reflect.ValueOf(rpcCall.Err)
}
rpcCall.callback.Call([]reflect.Value{reflect.ValueOf(rpcCall.Reply), valErr})
}
client.RemovePending(rpcCall.Seq)
ReleaseCall(rpcCall)
}
return err
}
func (handler *RpcHandler) goRpc(processor IRpcProcessor, bCast bool, nodeId int, serviceMethod string, args interface{}) error {
var pClientList [maxClusterNode]*Client
err, count := handler.funcRpcClient(nodeId, serviceMethod, pClientList[:])
if count == 0 {
func (handler *RpcHandler) goRpc(processor IRpcProcessor, bCast bool, nodeId string, serviceMethod string, args interface{}) error {
pClientList :=make([]*Client,0,maxClusterNode)
err, pClientList := handler.funcRpcClient(nodeId, serviceMethod,false, pClientList)
if len(pClientList) == 0 {
if err != nil {
log.SError("Call ", serviceMethod, " is error:", err.Error())
log.Error("call serviceMethod is failed",log.String("serviceMethod",serviceMethod),log.ErrorAttr("error",err))
} else {
log.SError("Can not find ", serviceMethod)
log.Error("cannot find serviceMethod",log.String("serviceMethod",serviceMethod))
}
return err
}
if count > 1 && bCast == false {
log.SError("Cannot call %s more then 1 node!", serviceMethod)
if len(pClientList) > 1 && bCast == false {
log.Error("cannot call serviceMethod more then 1 node",log.String("serviceMethod",serviceMethod))
return errors.New("cannot call more then 1 node")
}
//2.rpcClient调用
//如果调用本结点服务
for i := 0; i < count; i++ {
if pClientList[i].bSelfNode == true {
pLocalRpcServer := handler.funcRpcServer()
//判断是否是同一服务
findIndex := strings.Index(serviceMethod, ".")
if findIndex == -1 {
sErr := errors.New("Call serviceMethod " + serviceMethod + " is error!")
log.SError(sErr.Error())
err = sErr
continue
}
serviceName := serviceMethod[:findIndex]
if serviceName == handler.rpcHandler.GetName() { //自己服务调用
//调用自己rpcHandler处理器
return pLocalRpcServer.myselfRpcHandlerGo(serviceName, serviceMethod, args, nil)
}
//其他的rpcHandler的处理器
pCall := pLocalRpcServer.selfNodeRpcHandlerGo(processor, pClientList[i], true, serviceName, 0, serviceMethod, args, nil, nil)
if pCall.Err != nil {
err = pCall.Err
}
pClientList[i].RemovePending(pCall.Seq)
ReleaseCall(pCall)
continue
}
//跨node调用
pCall := pClientList[i].Go(true, serviceMethod, args, nil)
for i := 0; i < len(pClientList); i++ {
pCall := pClientList[i].Go(pClientList[i].GetTargetNodeId(),DefaultRpcTimeout,handler.rpcHandler,true, serviceMethod, args, nil)
if pCall.Err != nil {
err = pCall.Err
}
@@ -375,137 +463,76 @@ func (handler *RpcHandler) goRpc(processor IRpcProcessor, bCast bool, nodeId int
return err
}
func (handler *RpcHandler) callRpc(nodeId int, serviceMethod string, args interface{}, reply interface{}) error {
var pClientList [maxClusterNode]*Client
err, count := handler.funcRpcClient(nodeId, serviceMethod, pClientList[:])
func (handler *RpcHandler) callRpc(timeout time.Duration,nodeId string, serviceMethod string, args interface{}, reply interface{}) error {
pClientList :=make([]*Client,0,maxClusterNode)
err, pClientList := handler.funcRpcClient(nodeId, serviceMethod,false, pClientList)
if err != nil {
log.SError("Call serviceMethod is error:", err.Error())
log.Error("Call serviceMethod is failed",log.ErrorAttr("error",err))
return err
} else if count <= 0 {
} else if len(pClientList) <= 0 {
err = errors.New("Call serviceMethod is error:cannot find " + serviceMethod)
log.SError(err.Error())
log.Error("cannot find serviceMethod",log.String("serviceMethod",serviceMethod))
return err
} else if count > 1 {
log.SError("Cannot call more then 1 node!")
} else if len(pClientList) > 1 {
log.Error("Cannot call more then 1 node!",log.String("serviceMethod",serviceMethod))
return errors.New("cannot call more then 1 node")
}
//2.rpcClient调用
//如果调用本结点服务
pClient := pClientList[0]
if pClient.bSelfNode == true {
pLocalRpcServer := handler.funcRpcServer()
//判断是否是同一服务
findIndex := strings.Index(serviceMethod, ".")
if findIndex == -1 {
err := errors.New("Call serviceMethod " + serviceMethod + "is error!")
log.SError(err.Error())
return err
}
serviceName := serviceMethod[:findIndex]
if serviceName == handler.rpcHandler.GetName() { //自己服务调用
//调用自己rpcHandler处理器
return pLocalRpcServer.myselfRpcHandlerGo(serviceName, serviceMethod, args, reply)
}
//其他的rpcHandler的处理器
pCall := pLocalRpcServer.selfNodeRpcHandlerGo(nil, pClient, false, serviceName, 0, serviceMethod, args, reply, nil)
err = pCall.Done().Err
pClient.RemovePending(pCall.Seq)
ReleaseCall(pCall)
return err
}
pCall := pClient.Go(pClient.GetTargetNodeId(),timeout,handler.rpcHandler,false, serviceMethod, args, reply)
//跨node调用
pCall := pClient.Go(false, serviceMethod, args, reply)
if pCall.Err != nil {
err = pCall.Err
ReleaseCall(pCall)
return err
}
err = pCall.Done().Err
pClient.RemovePending(pCall.Seq)
ReleaseCall(pCall)
return err
}
func (handler *RpcHandler) asyncCallRpc(nodeId int, serviceMethod string, args interface{}, callback interface{}) error {
func (handler *RpcHandler) asyncCallRpc(timeout time.Duration,nodeId string, serviceMethod string, args interface{}, callback interface{}) (CancelRpc,error) {
fVal := reflect.ValueOf(callback)
if fVal.Kind() != reflect.Func {
err := errors.New("call " + serviceMethod + " input callback param is error!")
log.SError(err.Error())
return err
log.Error("input callback param is error",log.String("serviceMethod",serviceMethod))
return emptyCancelRpc,err
}
if fVal.Type().NumIn() != 2 {
err := errors.New("call " + serviceMethod + " callback param function is error!")
log.SError(err.Error())
return err
log.Error("callback param function is error",log.String("serviceMethod",serviceMethod))
return emptyCancelRpc,err
}
if fVal.Type().In(0).Kind() != reflect.Ptr || fVal.Type().In(1).String() != "error" {
err := errors.New("call " + serviceMethod + " callback param function is error!")
log.SError(err.Error())
return err
log.Error("callback param function is error",log.String("serviceMethod",serviceMethod))
return emptyCancelRpc,err
}
reply := reflect.New(fVal.Type().In(0).Elem()).Interface()
var pClientList [maxClusterNode]*Client
err, count := handler.funcRpcClient(nodeId, serviceMethod, pClientList[:])
if count == 0 || err != nil {
strNodeId := strconv.Itoa(nodeId)
pClientList :=make([]*Client,0,1)
err, pClientList := handler.funcRpcClient(nodeId, serviceMethod,false, pClientList[:])
if len(pClientList) == 0 || err != nil {
if err == nil {
err = errors.New("cannot find rpcClient from nodeId " + strNodeId + " " + serviceMethod)
if nodeId != NodeIdNull {
err = fmt.Errorf("cannot find %s from nodeId %d",serviceMethod,nodeId)
}else {
err = fmt.Errorf("No %s service found in the origin network",serviceMethod)
}
}
fVal.Call([]reflect.Value{reflect.ValueOf(reply), reflect.ValueOf(err)})
log.SError("Call serviceMethod is error:", err.Error())
return nil
log.Error("cannot find serviceMethod from node",log.String("serviceMethod",serviceMethod),log.String("nodeId",nodeId))
return emptyCancelRpc,nil
}
if count > 1 {
if len(pClientList) > 1 {
err := errors.New("cannot call more then 1 node")
fVal.Call([]reflect.Value{reflect.ValueOf(reply), reflect.ValueOf(err)})
log.SError(err.Error())
return nil
log.Error("cannot call more then 1 node",log.String("serviceMethod",serviceMethod))
return emptyCancelRpc,nil
}
//2.rpcClient调用
//如果调用本结点服务
pClient := pClientList[0]
if pClient.bSelfNode == true {
pLocalRpcServer := handler.funcRpcServer()
//判断是否是同一服务
findIndex := strings.Index(serviceMethod, ".")
if findIndex == -1 {
err := errors.New("Call serviceMethod " + serviceMethod + " is error!")
fVal.Call([]reflect.Value{reflect.ValueOf(reply), reflect.ValueOf(err)})
log.SError(err.Error())
return nil
}
serviceName := serviceMethod[:findIndex]
//调用自己rpcHandler处理器
if serviceName == handler.rpcHandler.GetName() { //自己服务调用
err := pLocalRpcServer.myselfRpcHandlerGo(serviceName, serviceMethod, args, reply)
if err == nil {
fVal.Call([]reflect.Value{reflect.ValueOf(reply), nilError})
} else {
fVal.Call([]reflect.Value{reflect.ValueOf(reply), reflect.ValueOf(err)})
}
}
//其他的rpcHandler的处理器
err = pLocalRpcServer.selfNodeRpcHandlerAsyncGo(pClient, handler, false, serviceName, serviceMethod, args, reply, fVal)
if err != nil {
fVal.Call([]reflect.Value{reflect.ValueOf(reply), reflect.ValueOf(err)})
}
return nil
}
//跨node调用
err = pClient.AsyncCall(handler, serviceMethod, fVal, args, reply)
if err != nil {
fVal.Call([]reflect.Value{reflect.ValueOf(reply), reflect.ValueOf(err)})
}
return nil
return pClientList[0].AsyncCall(pClientList[0].GetTargetNodeId(),timeout,handler.rpcHandler, serviceMethod, fVal, args, reply,false)
}
func (handler *RpcHandler) GetName() string {
@@ -516,79 +543,77 @@ func (handler *RpcHandler) IsSingleCoroutine() bool {
return handler.rpcHandler.IsSingleCoroutine()
}
func (handler *RpcHandler) CallWithTimeout(timeout time.Duration,serviceMethod string, args interface{}, reply interface{}) error {
return handler.callRpc(timeout,NodeIdNull, serviceMethod, args, reply)
}
func (handler *RpcHandler) CallNodeWithTimeout(timeout time.Duration,nodeId string, serviceMethod string, args interface{}, reply interface{}) error{
return handler.callRpc(timeout,nodeId, serviceMethod, args, reply)
}
func (handler *RpcHandler) AsyncCallWithTimeout(timeout time.Duration,serviceMethod string, args interface{}, callback interface{}) (CancelRpc,error){
return handler.asyncCallRpc(timeout,NodeIdNull, serviceMethod, args, callback)
}
func (handler *RpcHandler) AsyncCallNodeWithTimeout(timeout time.Duration,nodeId string, serviceMethod string, args interface{}, callback interface{}) (CancelRpc,error){
return handler.asyncCallRpc(timeout,nodeId, serviceMethod, args, callback)
}
func (handler *RpcHandler) AsyncCall(serviceMethod string, args interface{}, callback interface{}) error {
return handler.asyncCallRpc(0, serviceMethod, args, callback)
_,err := handler.asyncCallRpc(DefaultRpcTimeout,NodeIdNull, serviceMethod, args, callback)
return err
}
func (handler *RpcHandler) Call(serviceMethod string, args interface{}, reply interface{}) error {
return handler.callRpc(0, serviceMethod, args, reply)
return handler.callRpc(DefaultRpcTimeout,NodeIdNull, serviceMethod, args, reply)
}
func (handler *RpcHandler) Go(serviceMethod string, args interface{}) error {
return handler.goRpc(nil, false, 0, serviceMethod, args)
return handler.goRpc(nil, false, NodeIdNull, serviceMethod, args)
}
func (handler *RpcHandler) AsyncCallNode(nodeId int, serviceMethod string, args interface{}, callback interface{}) error {
return handler.asyncCallRpc(nodeId, serviceMethod, args, callback)
func (handler *RpcHandler) AsyncCallNode(nodeId string, serviceMethod string, args interface{}, callback interface{}) error {
_,err:= handler.asyncCallRpc(DefaultRpcTimeout,nodeId, serviceMethod, args, callback)
return err
}
func (handler *RpcHandler) CallNode(nodeId int, serviceMethod string, args interface{}, reply interface{}) error {
return handler.callRpc(nodeId, serviceMethod, args, reply)
func (handler *RpcHandler) CallNode(nodeId string, serviceMethod string, args interface{}, reply interface{}) error {
return handler.callRpc(DefaultRpcTimeout,nodeId, serviceMethod, args, reply)
}
func (handler *RpcHandler) GoNode(nodeId int, serviceMethod string, args interface{}) error {
func (handler *RpcHandler) GoNode(nodeId string, serviceMethod string, args interface{}) error {
return handler.goRpc(nil, false, nodeId, serviceMethod, args)
}
func (handler *RpcHandler) CastGo(serviceMethod string, args interface{}) error {
return handler.goRpc(nil, true, 0, serviceMethod, args)
return handler.goRpc(nil, true, NodeIdNull, serviceMethod, args)
}
func (handler *RpcHandler) RawGoNode(rpcProcessorType RpcProcessorType, nodeId int, rpcMethodId uint32, serviceName string, rawArgs IRawInputArgs) error {
func (handler *RpcHandler) RawGoNode(rpcProcessorType RpcProcessorType, nodeId string, rpcMethodId uint32, serviceName string, rawArgs []byte) error {
processor := GetProcessor(uint8(rpcProcessorType))
err, count := handler.funcRpcClient(nodeId, serviceName, handler.pClientList)
if count == 0 || err != nil {
//args.DoGc()
log.SError("Call serviceMethod is error:", err.Error())
pClientList := make([]*Client,0,1)
err, pClientList := handler.funcRpcClient(nodeId, serviceName,false, pClientList)
if len(pClientList) == 0 || err != nil {
log.Error("call serviceMethod is failed",log.ErrorAttr("error",err))
return err
}
if count > 1 {
//args.DoGc()
if len(pClientList) > 1 {
err := errors.New("cannot call more then 1 node")
log.SError(err.Error())
log.Error("cannot call more then 1 node",log.String("serviceName",serviceName))
return err
}
//2.rpcClient调用
//如果调用本结点服务
for i := 0; i < count; i++ {
if handler.pClientList[i].bSelfNode == true {
pLocalRpcServer := handler.funcRpcServer()
//调用自己rpcHandler处理器
if serviceName == handler.rpcHandler.GetName() { //自己服务调用
err := pLocalRpcServer.myselfRpcHandlerGo(serviceName, serviceName, rawArgs.GetRawData(), nil)
//args.DoGc()
return err
}
//其他的rpcHandler的处理器
pCall := pLocalRpcServer.selfNodeRpcHandlerGo(processor, handler.pClientList[i], true, serviceName, rpcMethodId, serviceName, nil, nil, rawArgs.GetRawData())
rawArgs.DoEscape()
if pCall.Err != nil {
err = pCall.Err
}
handler.pClientList[i].RemovePending(pCall.Seq)
ReleaseCall(pCall)
continue
}
for i := 0; i < len(pClientList); i++ {
//跨node调用
pCall := handler.pClientList[i].RawGo(processor, true, rpcMethodId, serviceName, rawArgs.GetRawData(), nil)
rawArgs.DoFree()
pCall := pClientList[i].RawGo(pClientList[i].GetTargetNodeId(),DefaultRpcTimeout,handler.rpcHandler,processor, true, rpcMethodId, serviceName, rawArgs, nil)
if pCall.Err != nil {
err = pCall.Err
}
handler.pClientList[i].RemovePending(pCall.Seq)
pClientList[i].RemovePending(pCall.Seq)
ReleaseCall(pCall)
}
@@ -601,23 +626,7 @@ func (handler *RpcHandler) RegRawRpc(rpcMethodId uint32, rawRpcCB RawRpcCallBack
func (handler *RpcHandler) UnmarshalInParam(rpcProcessor IRpcProcessor, serviceMethod string, rawRpcMethodId uint32, inParam []byte) (interface{}, error) {
if rawRpcMethodId > 0 {
v, ok := handler.mapRawFunctions[rawRpcMethodId]
if ok == false {
strRawRpcMethodId := strconv.FormatUint(uint64(rawRpcMethodId), 10)
err := errors.New("RpcHandler cannot find request rpc id " + strRawRpcMethodId)
log.SError(err.Error())
return nil, err
}
msg, err := v.Unmarshal(inParam)
if err != nil {
strRawRpcMethodId := strconv.FormatUint(uint64(rawRpcMethodId), 10)
err := errors.New("RpcHandler cannot Unmarshal rpc id " + strRawRpcMethodId)
log.SError(err.Error())
return nil, err
}
return msg, err
return inParam,nil
}
v, ok := handler.mapFunctions[serviceMethod]
@@ -630,3 +639,8 @@ func (handler *RpcHandler) UnmarshalInParam(rpcProcessor IRpcProcessor, serviceM
err = rpcProcessor.Unmarshal(inParam, param)
return param, err
}
func (handler *RpcHandler) GetRpcServer() FuncRpcServer{
return handler.funcRpcServer
}

42
rpc/rpcnats.go Normal file
View File

@@ -0,0 +1,42 @@
package rpc
import "sync/atomic"
type RpcNats struct {
NatsServer
NatsClient
}
func (rn *RpcNats) Start() error{
err := rn.NatsServer.Start()
if err != nil {
return err
}
return rn.NatsClient.Start(rn.NatsServer.natsConn)
}
func (rn *RpcNats) Init(natsUrl string, noRandomize bool, nodeId string,compressBytesLen int,rpcHandleFinder RpcHandleFinder,notifyEventFun NotifyEventFun){
rn.NatsClient.localNodeId = nodeId
rn.NatsServer.initServer(natsUrl,noRandomize, nodeId,compressBytesLen,rpcHandleFinder,notifyEventFun)
rn.NatsServer.iServer = rn
}
func (rn *RpcNats) NewNatsClient(targetNodeId string,localNodeId string,callSet *CallSet,notifyEventFun NotifyEventFun) *Client{
var client Client
client.clientId = atomic.AddUint32(&clientSeq, 1)
client.targetNodeId = targetNodeId
//client.maxCheckCallRpcCount = DefaultMaxCheckCallRpcCount
//client.callRpcTimeout = DefaultRpcTimeout
natsClient := &rn.NatsClient
natsClient.localNodeId = localNodeId
natsClient.client = &client
natsClient.notifyEventFun = notifyEventFun
client.IRealClient = natsClient
client.CallSet = callSet
return &client
}

89
rpc/rpctimer.go Normal file
View File

@@ -0,0 +1,89 @@
package rpc
import (
"container/heap"
"time"
)
type CallTimer struct {
SeqId uint64
FireTime int64
}
type CallTimerHeap struct {
callTimer []CallTimer
mapSeqIndex map[uint64]int
}
func (h *CallTimerHeap) Init() {
h.mapSeqIndex = make(map[uint64]int, 4096)
h.callTimer = make([]CallTimer, 0, 4096)
}
func (h *CallTimerHeap) Len() int {
return len(h.callTimer)
}
func (h *CallTimerHeap) Less(i, j int) bool {
return h.callTimer[i].FireTime < h.callTimer[j].FireTime
}
func (h *CallTimerHeap) Swap(i, j int) {
h.callTimer[i], h.callTimer[j] = h.callTimer[j], h.callTimer[i]
h.mapSeqIndex[h.callTimer[i].SeqId] = i
h.mapSeqIndex[h.callTimer[j].SeqId] = j
}
func (h *CallTimerHeap) Push(t any) {
callTimer := t.(CallTimer)
h.mapSeqIndex[callTimer.SeqId] = len(h.callTimer)
h.callTimer = append(h.callTimer, callTimer)
}
func (h *CallTimerHeap) Pop() any {
l := len(h.callTimer)
seqId := h.callTimer[l-1].SeqId
h.callTimer = h.callTimer[:l-1]
delete(h.mapSeqIndex, seqId)
return seqId
}
func (h *CallTimerHeap) Cancel(seq uint64) bool {
index, ok := h.mapSeqIndex[seq]
if ok == false {
return false
}
heap.Remove(h, index)
return true
}
func (h *CallTimerHeap) AddTimer(seqId uint64,d time.Duration){
heap.Push(h, CallTimer{
SeqId: seqId,
FireTime: time.Now().Add(d).UnixNano(),
})
}
func (h *CallTimerHeap) PopTimeout() uint64 {
if h.Len() == 0 {
return 0
}
nextFireTime := h.callTimer[0].FireTime
if nextFireTime > time.Now().UnixNano() {
return 0
}
return heap.Pop(h).(uint64)
}
func (h *CallTimerHeap) PopFirst() uint64 {
if h.Len() == 0 {
return 0
}
return heap.Pop(h).(uint64)
}

View File

@@ -1,32 +1,48 @@
package rpc
import (
"errors"
"fmt"
"github.com/duanhf2012/origin/log"
"github.com/duanhf2012/origin/network"
"github.com/duanhf2012/origin/v2/log"
"github.com/duanhf2012/origin/v2/network"
"math"
"net"
"reflect"
"strings"
"time"
"runtime"
)
const Default_ReadWriteDeadline = 15*time.Second
type RpcProcessorType uint8
const (
RpcProcessorJson RpcProcessorType = 0
RpcProcessorGoGoPB RpcProcessorType = 1
RpcProcessorPB RpcProcessorType = 1
)
//var processor IRpcProcessor = &JsonProcessor{}
var arrayProcessor = []IRpcProcessor{&JsonProcessor{}, &GoGoPBProcessor{}}
var arrayProcessor = []IRpcProcessor{&JsonProcessor{}, &PBProcessor{}}
var arrayProcessorLen uint8 = 2
var LittleEndian bool
type IServer interface {
Start() error
Stop()
selfNodeRpcHandlerGo(timeout time.Duration,processor IRpcProcessor, client *Client, noReply bool, handlerName string, rpcMethodId uint32, serviceMethod string, args interface{}, reply interface{}, rawArgs []byte) *Call
myselfRpcHandlerGo(client *Client,handlerName string, serviceMethod string, args interface{},callBack reflect.Value, reply interface{}) error
selfNodeRpcHandlerAsyncGo(timeout time.Duration,client *Client, callerRpcHandler IRpcHandler, noReply bool, handlerName string, serviceMethod string, args interface{}, reply interface{}, callback reflect.Value,cancelable bool) (CancelRpc,error)
}
type writeResponse func(processor IRpcProcessor,connTag string, serviceMethod string, seq uint64, reply interface{}, rpcError RpcError)
type Server struct {
BaseServer
functions map[interface{}]interface{}
rpcHandleFinder RpcHandleFinder
rpcServer *network.TCPServer
listenAddr string
maxRpcParamLen uint32
}
type RpcAgent struct {
@@ -57,36 +73,47 @@ func GetProcessor(processorType uint8) IRpcProcessor {
return arrayProcessor[processorType]
}
func (server *Server) Init(rpcHandleFinder RpcHandleFinder) {
server.rpcHandleFinder = rpcHandleFinder
func (server *Server) Init(listenAddr string, maxRpcParamLen uint32,compressBytesLen int,rpcHandleFinder RpcHandleFinder) {
server.initBaseServer(compressBytesLen,rpcHandleFinder)
server.listenAddr = listenAddr
server.maxRpcParamLen = maxRpcParamLen
server.rpcServer = &network.TCPServer{}
}
func (server *Server) Start(listenAddr string, maxRpcParamLen uint32) {
splitAddr := strings.Split(listenAddr, ":")
func (server *Server) Start() error{
splitAddr := strings.Split(server.listenAddr, ":")
if len(splitAddr) != 2 {
log.SFatal("listen addr is error :", listenAddr)
return fmt.Errorf("listen addr is failed,listenAddr:%s", server.listenAddr)
}
server.rpcServer.Addr = ":" + splitAddr[1]
server.rpcServer.LenMsgLen = 4 //uint16
server.rpcServer.MinMsgLen = 2
if maxRpcParamLen > 0 {
server.rpcServer.MaxMsgLen = maxRpcParamLen
server.compressBytesLen = server.compressBytesLen
if server.maxRpcParamLen > 0 {
server.rpcServer.MaxMsgLen = server.maxRpcParamLen
} else {
server.rpcServer.MaxMsgLen = math.MaxUint32
}
server.rpcServer.MaxConnNum = 10000
server.rpcServer.MaxConnNum = 100000
server.rpcServer.PendingWriteNum = 2000000
server.rpcServer.NewAgent = server.NewAgent
server.rpcServer.LittleEndian = LittleEndian
server.rpcServer.Start()
server.rpcServer.WriteDeadline = Default_ReadWriteDeadline
server.rpcServer.ReadDeadline = Default_ReadWriteDeadline
server.rpcServer.LenMsgLen = DefaultRpcLenMsgLen
return server.rpcServer.Start()
}
func (server *Server) Stop(){
server.rpcServer.Close()
}
func (agent *RpcAgent) OnDestroy() {}
func (agent *RpcAgent) WriteResponse(processor IRpcProcessor, serviceMethod string, seq uint64, reply interface{}, rpcError RpcError) {
func (agent *RpcAgent) WriteResponse(processor IRpcProcessor, connTag string, serviceMethod string, seq uint64, reply interface{}, rpcError RpcError) {
var mReply []byte
var errM error
@@ -103,104 +130,59 @@ func (agent *RpcAgent) WriteResponse(processor IRpcProcessor, serviceMethod stri
defer processor.ReleaseRpcResponse(rpcResponse.RpcResponseData)
if errM != nil {
log.SError("service method ", serviceMethod, " Marshal error:", errM.Error())
log.Error("mashal RpcResponseData failed",log.String("serviceMethod",serviceMethod),log.ErrorAttr("error",errM))
return
}
errM = agent.conn.WriteMsg([]byte{uint8(processor.GetProcessorType())}, bytes)
var compressBuff[]byte
bCompress := uint8(0)
if agent.rpcServer.compressBytesLen >0 && len(bytes) >= agent.rpcServer.compressBytesLen {
var cErr error
compressBuff,cErr = compressor.CompressBlock(bytes)
if cErr != nil {
log.Error("CompressBlock failed",log.String("serviceMethod",serviceMethod),log.ErrorAttr("error",cErr))
return
}
if len(compressBuff) < len(bytes) {
bytes = compressBuff
bCompress = 1<<7
}
}
errM = agent.conn.WriteMsg([]byte{uint8(processor.GetProcessorType())|bCompress}, bytes)
if cap(compressBuff) >0 {
compressor.CompressBufferCollection(compressBuff)
}
if errM != nil {
log.SError("Rpc ", serviceMethod, " return is error:", errM.Error())
log.Error("WriteMsg error,Rpc return is fail",log.String("serviceMethod",serviceMethod),log.ErrorAttr("error",errM))
}
}
func (agent *RpcAgent) Run() {
defer func() {
if r := recover(); r != nil {
buf := make([]byte, 4096)
l := runtime.Stack(buf, false)
errString := fmt.Sprint(r)
log.Dump(string(buf[:l]),log.String("error",errString))
}
}()
for {
data, err := agent.conn.ReadMsg()
if err != nil {
log.SError("remoteAddress:", agent.conn.RemoteAddr().String(), ",read message: ", err.Error())
log.Error("read message is error",log.String("remoteAddress",agent.conn.RemoteAddr().String()),log.ErrorAttr("error",err))
//will close tcpconn
break
}
processor := GetProcessor(data[0])
if processor == nil {
agent.conn.ReleaseReadMsg(data)
log.SError("remote rpc ", agent.conn.RemoteAddr(), " cannot find processor:", data[0])
return
}
//解析head
req := MakeRpcRequest(processor, 0, 0, "", false, nil)
err = processor.Unmarshal(data[1:], req.RpcRequestData)
agent.conn.ReleaseReadMsg(data)
defer agent.conn.ReleaseReadMsg(data)
err = agent.rpcServer.processRpcRequest( data,"",agent.WriteResponse)
if err != nil {
log.SError("rpc Unmarshal request is error:", err.Error())
if req.RpcRequestData.GetSeq() > 0 {
rpcError := RpcError(err.Error())
if req.RpcRequestData.IsNoReply() == false {
agent.WriteResponse(processor, req.RpcRequestData.GetServiceMethod(), req.RpcRequestData.GetSeq(), nil, rpcError)
}
ReleaseRpcRequest(req)
continue
} else {
//will close tcpconn
ReleaseRpcRequest(req)
break
}
}
//交给程序处理
serviceMethod := strings.Split(req.RpcRequestData.GetServiceMethod(), ".")
if len(serviceMethod) < 1 {
rpcError := RpcError("rpc request req.ServiceMethod is error")
if req.RpcRequestData.IsNoReply() == false {
agent.WriteResponse(processor, req.RpcRequestData.GetServiceMethod(), req.RpcRequestData.GetSeq(), nil, rpcError)
}
ReleaseRpcRequest(req)
log.SError("rpc request req.ServiceMethod is error")
continue
}
rpcHandler := agent.rpcServer.rpcHandleFinder.FindRpcHandler(serviceMethod[0])
if rpcHandler == nil {
rpcError := RpcError(fmt.Sprintf("service method %s not config!", req.RpcRequestData.GetServiceMethod()))
if req.RpcRequestData.IsNoReply() == false {
agent.WriteResponse(processor, req.RpcRequestData.GetServiceMethod(), req.RpcRequestData.GetSeq(), nil, rpcError)
}
log.SError("service method ", req.RpcRequestData.GetServiceMethod(), " not config!")
ReleaseRpcRequest(req)
continue
}
if req.RpcRequestData.IsNoReply() == false {
req.requestHandle = func(Returns interface{}, Err RpcError) {
agent.WriteResponse(processor, req.RpcRequestData.GetServiceMethod(), req.RpcRequestData.GetSeq(), Returns, Err)
ReleaseRpcRequest(req)
}
}
req.inParam, err = rpcHandler.UnmarshalInParam(req.rpcProcessor, req.RpcRequestData.GetServiceMethod(), req.RpcRequestData.GetRpcMethodId(), req.RpcRequestData.GetInParam())
if err != nil {
rErr := "Call Rpc " + req.RpcRequestData.GetServiceMethod() + " Param error " + err.Error()
if req.requestHandle != nil {
req.requestHandle(nil, RpcError(rErr))
} else {
ReleaseRpcRequest(req)
}
log.SError(rErr)
continue
}
err = rpcHandler.PushRpcRequest(req)
if err != nil {
rpcError := RpcError(err.Error())
if req.RpcRequestData.IsNoReply() {
agent.WriteResponse(processor, req.RpcRequestData.GetServiceMethod(), req.RpcRequestData.GetSeq(), nil, rpcError)
}
ReleaseRpcRequest(req)
log.Error("processRpcRequest is error",log.String("remoteAddress",agent.conn.RemoteAddr().String()),log.ErrorAttr("error",err))
//will close tcpconn
break
}
}
}
@@ -233,140 +215,3 @@ func (server *Server) NewAgent(c *network.TCPConn) network.Agent {
return agent
}
func (server *Server) myselfRpcHandlerGo(handlerName string, serviceMethod string, args interface{}, reply interface{}) error {
rpcHandler := server.rpcHandleFinder.FindRpcHandler(handlerName)
if rpcHandler == nil {
err := errors.New("service method " + serviceMethod + " not config!")
log.SError(err.Error())
return err
}
return rpcHandler.CallMethod(serviceMethod, args, reply)
}
func (server *Server) selfNodeRpcHandlerGo(processor IRpcProcessor, client *Client, noReply bool, handlerName string, rpcMethodId uint32, serviceMethod string, args interface{}, reply interface{}, rawArgs []byte) *Call {
pCall := MakeCall()
pCall.Seq = client.generateSeq()
rpcHandler := server.rpcHandleFinder.FindRpcHandler(handlerName)
if rpcHandler == nil {
pCall.Seq = 0
pCall.Err = errors.New("service method " + serviceMethod + " not config!")
log.SError(pCall.Err.Error())
pCall.done <- pCall
return pCall
}
if processor == nil {
_, processor = GetProcessorType(args)
}
req := MakeRpcRequest(processor, 0, rpcMethodId, serviceMethod, noReply, nil)
req.inParam = args
req.localReply = reply
if rawArgs != nil {
var err error
req.inParam, err = rpcHandler.UnmarshalInParam(processor, serviceMethod, rpcMethodId, rawArgs)
if err != nil {
ReleaseRpcRequest(req)
pCall.Err = err
pCall.done <- pCall
return pCall
}
}
if noReply == false {
client.AddPending(pCall)
req.requestHandle = func(Returns interface{}, Err RpcError) {
if reply != nil && Returns != reply && Returns != nil {
byteReturns, err := req.rpcProcessor.Marshal(Returns)
if err != nil {
log.SError("returns data cannot be marshal ", pCall.Seq)
ReleaseRpcRequest(req)
}
err = req.rpcProcessor.Unmarshal(byteReturns, reply)
if err != nil {
log.SError("returns data cannot be Unmarshal ", pCall.Seq)
ReleaseRpcRequest(req)
}
}
v := client.RemovePending(pCall.Seq)
if v == nil {
log.SError("rpcClient cannot find seq ", pCall.Seq, " in pending")
ReleaseRpcRequest(req)
return
}
if len(Err) == 0 {
pCall.Err = nil
} else {
pCall.Err = Err
}
pCall.done <- pCall
ReleaseRpcRequest(req)
}
}
err := rpcHandler.PushRpcRequest(req)
if err != nil {
ReleaseRpcRequest(req)
pCall.Err = err
pCall.done <- pCall
}
return pCall
}
func (server *Server) selfNodeRpcHandlerAsyncGo(client *Client, callerRpcHandler IRpcHandler, noReply bool, handlerName string, serviceMethod string, args interface{}, reply interface{}, callback reflect.Value) error {
rpcHandler := server.rpcHandleFinder.FindRpcHandler(handlerName)
if rpcHandler == nil {
err := errors.New("service method " + serviceMethod + " not config!")
log.SError(err.Error())
return err
}
_, processor := GetProcessorType(args)
req := MakeRpcRequest(processor, 0, 0, serviceMethod, noReply, nil)
req.inParam = args
req.localReply = reply
if noReply == false {
callSeq := client.generateSeq()
pCall := MakeCall()
pCall.Seq = callSeq
pCall.rpcHandler = callerRpcHandler
pCall.callback = &callback
pCall.Reply = reply
client.AddPending(pCall)
req.requestHandle = func(Returns interface{}, Err RpcError) {
v := client.RemovePending(callSeq)
if v == nil {
log.SError("rpcClient cannot find seq ", pCall.Seq, " in pending")
//ReleaseCall(pCall)
ReleaseRpcRequest(req)
return
}
if len(Err) == 0 {
pCall.Err = nil
} else {
pCall.Err = Err
}
if Returns != nil {
pCall.Reply = Returns
}
pCall.rpcHandler.PushRpcResponse(pCall)
ReleaseRpcRequest(req)
}
}
err := rpcHandler.PushRpcRequest(req)
if err != nil {
ReleaseRpcRequest(req)
return err
}
return nil
}

View File

@@ -2,31 +2,35 @@ package service
import (
"fmt"
"github.com/duanhf2012/origin/event"
"github.com/duanhf2012/origin/log"
rpcHandle "github.com/duanhf2012/origin/rpc"
"github.com/duanhf2012/origin/util/timer"
"reflect"
"sync/atomic"
"time"
"github.com/duanhf2012/origin/v2/event"
"github.com/duanhf2012/origin/v2/log"
rpcHandle "github.com/duanhf2012/origin/v2/rpc"
"github.com/duanhf2012/origin/v2/util/timer"
"github.com/duanhf2012/origin/v2/concurrent"
)
const InitModuleId = 1e9
type IModule interface {
concurrent.IConcurrent
SetModuleId(moduleId uint32) bool
GetModuleId() uint32
AddModule(module IModule) (uint32,error)
AddModule(module IModule) (uint32, error)
GetModule(moduleId uint32) IModule
GetAncestor()IModule
GetAncestor() IModule
ReleaseModule(moduleId uint32)
NewModuleId() uint32
GetParent()IModule
GetParent() IModule
OnInit() error
OnRelease()
getBaseModule() IModule
GetService() IService
GetModuleName() string
GetEventProcessor()event.IEventProcessor
GetEventProcessor() event.IEventProcessor
NotifyEvent(ev event.IEvent)
}
@@ -38,25 +42,26 @@ type IModuleTimer interface {
type Module struct {
rpcHandle.IRpcHandler
moduleId uint32 //模块Id
moduleName string //模块名称
parent IModule //父亲
self IModule //自己
child map[uint32]IModule //孩子们
mapActiveTimer map[timer.ITimer]struct{}
moduleId uint32 //模块Id
moduleName string //模块名称
parent IModule //父亲
self IModule //自己
child map[uint32]IModule //孩子们
mapActiveTimer map[timer.ITimer]struct{}
mapActiveIdTimer map[uint64]timer.ITimer
dispatcher *timer.Dispatcher //timer
dispatcher *timer.Dispatcher //timer
//根结点
ancestor IModule //始祖
seedModuleId uint32 //模块id种子
descendants map[uint32]IModule //始祖的后裔们
ancestor IModule //始祖
seedModuleId uint32 //模块id种子
descendants map[uint32]IModule //始祖的后裔们
//事件管道
eventHandler event.IEventHandler
concurrent.IConcurrent
}
func (m *Module) SetModuleId(moduleId uint32) bool{
func (m *Module) SetModuleId(moduleId uint32) bool {
if m.moduleId > 0 {
return false
}
@@ -65,35 +70,35 @@ func (m *Module) SetModuleId(moduleId uint32) bool{
return true
}
func (m *Module) GetModuleId() uint32{
func (m *Module) GetModuleId() uint32 {
return m.moduleId
}
func (m *Module) GetModuleName() string{
func (m *Module) GetModuleName() string {
return m.moduleName
}
func (m *Module) OnInit() error{
return nil
func (m *Module) OnInit() error {
return nil
}
func (m *Module) AddModule(module IModule) (uint32,error){
func (m *Module) AddModule(module IModule) (uint32, error) {
//没有事件处理器不允许加入其他模块
if m.GetEventProcessor() == nil {
return 0,fmt.Errorf("module %+v Event Processor is nil", m.self)
return 0, fmt.Errorf("module %+v Event Processor is nil", m.self)
}
pAddModule := module.getBaseModule().(*Module)
if pAddModule.GetModuleId()==0 {
if pAddModule.GetModuleId() == 0 {
pAddModule.moduleId = m.NewModuleId()
}
if m.child == nil {
m.child = map[uint32]IModule{}
}
_,ok := m.child[module.GetModuleId()]
_, ok := m.child[module.GetModuleId()]
if ok == true {
return 0,fmt.Errorf("exists module id %d",module.GetModuleId())
return 0, fmt.Errorf("exists module id %d", module.GetModuleId())
}
pAddModule.IRpcHandler = m.IRpcHandler
pAddModule.self = module
@@ -103,19 +108,20 @@ func (m *Module) AddModule(module IModule) (uint32,error){
pAddModule.moduleName = reflect.Indirect(reflect.ValueOf(module)).Type().Name()
pAddModule.eventHandler = event.NewEventHandler()
pAddModule.eventHandler.Init(m.eventHandler.GetEventProcessor())
pAddModule.IConcurrent = m.IConcurrent
err := module.OnInit()
if err != nil {
return 0,err
return 0, err
}
m.child[module.GetModuleId()] = module
m.ancestor.getBaseModule().(*Module).descendants[module.GetModuleId()] = module
log.SDebug("Add module ",module.GetModuleName()," completed")
return module.GetModuleId(),nil
log.Debug("Add module "+module.GetModuleName()+ " completed")
return module.GetModuleId(), nil
}
func (m *Module) ReleaseModule(moduleId uint32){
func (m *Module) ReleaseModule(moduleId uint32) {
pModule := m.GetModule(moduleId).getBaseModule().(*Module)
//释放子孙
@@ -123,19 +129,19 @@ func (m *Module) ReleaseModule(moduleId uint32){
m.ReleaseModule(id)
}
pModule.GetEventHandler().Destroy()
pModule.self.OnRelease()
log.SDebug("Release module ", pModule.GetModuleName())
pModule.GetEventHandler().Destroy()
log.Debug("Release module "+ pModule.GetModuleName())
for pTimer := range pModule.mapActiveTimer {
pTimer.Cancel()
}
for _,t := range pModule.mapActiveIdTimer {
for _, t := range pModule.mapActiveIdTimer {
t.Cancel()
}
delete(m.child,moduleId)
delete (m.ancestor.getBaseModule().(*Module).descendants,moduleId)
delete(m.child, moduleId)
delete(m.ancestor.getBaseModule().(*Module).descendants, moduleId)
//清理被删除的Module
pModule.self = nil
@@ -149,16 +155,17 @@ func (m *Module) ReleaseModule(moduleId uint32){
pModule.mapActiveIdTimer = nil
}
func (m *Module) NewModuleId() uint32{
m.ancestor.getBaseModule().(*Module).seedModuleId+=1
func (m *Module) NewModuleId() uint32 {
m.ancestor.getBaseModule().(*Module).seedModuleId += 1
return m.ancestor.getBaseModule().(*Module).seedModuleId
}
var timerSeedId uint32
func (m *Module) GenTimerId() uint64{
for{
newTimerId := (uint64(m.GetModuleId())<<32)|uint64(atomic.AddUint32(&timerSeedId,1))
if _,ok := m.mapActiveIdTimer[newTimerId];ok == true {
func (m *Module) GenTimerId() uint64 {
for {
newTimerId := (uint64(m.GetModuleId()) << 32) | uint64(atomic.AddUint32(&timerSeedId, 1))
if _, ok := m.mapActiveIdTimer[newTimerId]; ok == true {
continue
}
@@ -166,33 +173,32 @@ func (m *Module) GenTimerId() uint64{
}
}
func (m *Module) GetAncestor()IModule{
func (m *Module) GetAncestor() IModule {
return m.ancestor
}
func (m *Module) GetModule(moduleId uint32) IModule{
iModule,ok := m.GetAncestor().getBaseModule().(*Module).descendants[moduleId]
func (m *Module) GetModule(moduleId uint32) IModule {
iModule, ok := m.GetAncestor().getBaseModule().(*Module).descendants[moduleId]
if ok == false {
return nil
}
return iModule
}
func (m *Module) getBaseModule() IModule{
func (m *Module) getBaseModule() IModule {
return m
}
func (m *Module) GetParent()IModule{
func (m *Module) GetParent() IModule {
return m.parent
}
func (m *Module) OnCloseTimer(t timer.ITimer){
delete(m.mapActiveIdTimer,t.GetId())
delete(m.mapActiveTimer,t)
func (m *Module) OnCloseTimer(t timer.ITimer) {
delete(m.mapActiveIdTimer, t.GetId())
delete(m.mapActiveTimer, t)
}
func (m *Module) OnAddTimer(t timer.ITimer){
func (m *Module) OnAddTimer(t timer.ITimer) {
if t != nil {
if m.mapActiveTimer == nil {
m.mapActiveTimer = map[timer.ITimer]struct{}{}
@@ -204,33 +210,33 @@ func (m *Module) OnAddTimer(t timer.ITimer){
func (m *Module) AfterFunc(d time.Duration, cb func(*timer.Timer)) *timer.Timer {
if m.mapActiveTimer == nil {
m.mapActiveTimer =map[timer.ITimer]struct{}{}
m.mapActiveTimer = map[timer.ITimer]struct{}{}
}
return m.dispatcher.AfterFunc(d,nil,cb,m.OnCloseTimer,m.OnAddTimer)
return m.dispatcher.AfterFunc(d, nil, cb, m.OnCloseTimer, m.OnAddTimer)
}
func (m *Module) CronFunc(cronExpr *timer.CronExpr, cb func(*timer.Cron)) *timer.Cron {
if m.mapActiveTimer == nil {
m.mapActiveTimer =map[timer.ITimer]struct{}{}
m.mapActiveTimer = map[timer.ITimer]struct{}{}
}
return m.dispatcher.CronFunc(cronExpr,nil,cb,m.OnCloseTimer,m.OnAddTimer)
return m.dispatcher.CronFunc(cronExpr, nil, cb, m.OnCloseTimer, m.OnAddTimer)
}
func (m *Module) NewTicker(d time.Duration, cb func(*timer.Ticker)) *timer.Ticker {
if m.mapActiveTimer == nil {
m.mapActiveTimer =map[timer.ITimer]struct{}{}
m.mapActiveTimer = map[timer.ITimer]struct{}{}
}
return m.dispatcher.TickerFunc(d,nil,cb,m.OnCloseTimer,m.OnAddTimer)
return m.dispatcher.TickerFunc(d, nil, cb, m.OnCloseTimer, m.OnAddTimer)
}
func (m *Module) cb(*timer.Timer){
func (m *Module) cb(*timer.Timer) {
}
func (m *Module) SafeAfterFunc(timerId *uint64,d time.Duration, AdditionData interface{},cb func(uint64,interface{})) {
func (m *Module) SafeAfterFunc(timerId *uint64, d time.Duration, AdditionData interface{}, cb func(uint64, interface{})) {
if m.mapActiveIdTimer == nil {
m.mapActiveIdTimer = map[uint64]timer.ITimer{}
}
@@ -240,45 +246,50 @@ func (m *Module) SafeAfterFunc(timerId *uint64,d time.Duration, AdditionData int
}
*timerId = m.GenTimerId()
t := m.dispatcher.AfterFunc(d,cb,nil,m.OnCloseTimer,m.OnAddTimer)
t := m.dispatcher.AfterFunc(d, cb, nil, m.OnCloseTimer, m.OnAddTimer)
t.AdditionData = AdditionData
t.Id = *timerId
m.mapActiveIdTimer[*timerId] = t
}
func (m *Module) SafeCronFunc(cronId *uint64,cronExpr *timer.CronExpr, AdditionData interface{}, cb func(uint64,interface{})) {
func (m *Module) SafeCronFunc(cronId *uint64, cronExpr *timer.CronExpr, AdditionData interface{}, cb func(uint64, interface{})) {
if m.mapActiveIdTimer == nil {
m.mapActiveIdTimer = map[uint64]timer.ITimer{}
}
*cronId = m.GenTimerId()
c := m.dispatcher.CronFunc(cronExpr,cb,nil,m.OnCloseTimer,m.OnAddTimer)
c := m.dispatcher.CronFunc(cronExpr, cb, nil, m.OnCloseTimer, m.OnAddTimer)
c.AdditionData = AdditionData
c.Id = *cronId
m.mapActiveIdTimer[*cronId] = c
}
func (m *Module) SafeNewTicker(tickerId *uint64,d time.Duration, AdditionData interface{}, cb func(uint64,interface{})) {
func (m *Module) SafeNewTicker(tickerId *uint64, d time.Duration, AdditionData interface{}, cb func(uint64, interface{})) {
if m.mapActiveIdTimer == nil {
m.mapActiveIdTimer = map[uint64]timer.ITimer{}
}
*tickerId = m.GenTimerId()
t := m.dispatcher.TickerFunc(d,cb,nil,m.OnCloseTimer,m.OnAddTimer)
t := m.dispatcher.TickerFunc(d, cb, nil, m.OnCloseTimer, m.OnAddTimer)
t.AdditionData = AdditionData
t.Id = *tickerId
m.mapActiveIdTimer[*tickerId] = t
}
func (m *Module) CancelTimerId(timerId *uint64) bool{
if m.mapActiveIdTimer == nil {
log.SError("mapActiveIdTimer is nil")
func (m *Module) CancelTimerId(timerId *uint64) bool {
if timerId==nil || *timerId == 0 {
log.Warning("timerId is invalid")
return false
}
t,ok := m.mapActiveIdTimer[*timerId]
if m.mapActiveIdTimer == nil {
log.Error("mapActiveIdTimer is nil")
return false
}
t, ok := m.mapActiveIdTimer[*timerId]
if ok == false {
log.SError("cannot find timer id ",timerId)
log.Stack("cannot find timer id ", log.Uint64("timerId",*timerId))
return false
}
@@ -287,23 +298,21 @@ func (m *Module) CancelTimerId(timerId *uint64) bool{
return true
}
func (m *Module) OnRelease(){
func (m *Module) OnRelease() {
}
func (m *Module) GetService() IService {
return m.GetAncestor().(IService)
}
func (m *Module) GetEventProcessor() event.IEventProcessor{
func (m *Module) GetEventProcessor() event.IEventProcessor {
return m.eventHandler.GetEventProcessor()
}
func (m *Module) NotifyEvent(ev event.IEvent){
func (m *Module) NotifyEvent(ev event.IEvent) {
m.eventHandler.NotifyEvent(ev)
}
func (m *Module) GetEventHandler() event.IEventHandler{
func (m *Module) GetEventHandler() event.IEventHandler {
return m.eventHandler
}
}

View File

@@ -3,76 +3,100 @@ package service
import (
"errors"
"fmt"
"github.com/duanhf2012/origin/event"
"github.com/duanhf2012/origin/log"
"github.com/duanhf2012/origin/profiler"
"github.com/duanhf2012/origin/rpc"
originSync "github.com/duanhf2012/origin/util/sync"
"github.com/duanhf2012/origin/util/timer"
"github.com/duanhf2012/origin/v2/event"
"github.com/duanhf2012/origin/v2/log"
"github.com/duanhf2012/origin/v2/profiler"
"github.com/duanhf2012/origin/v2/rpc"
"github.com/duanhf2012/origin/v2/util/timer"
"reflect"
"runtime"
"strconv"
"sync"
"sync/atomic"
"github.com/duanhf2012/origin/v2/concurrent"
"encoding/json"
)
var closeSig chan bool
var timerDispatcherLen = 100000
var maxServiceEventChannelNum = 2000000
type IService interface {
concurrent.IConcurrent
Init(iService IService,getClientFun rpc.FuncRpcClient,getServerFun rpc.FuncRpcServer,serviceCfg interface{})
SetName(serviceName string)
GetName() string
Stop()
Start()
OnSetup(iService IService)
OnInit() error
OnStart()
OnRetire()
OnRelease()
Wait()
Start()
SetName(serviceName string)
GetName() string
GetRpcHandler() rpc.IRpcHandler
GetServiceCfg()interface{}
OpenProfiler()
GetProfiler() *profiler.Profiler
}
GetServiceEventChannelNum() int
GetServiceTimerChannelNum() int
// eventPool的内存池,缓存Event
var maxServiceEventChannel = 2000000
var eventPool = originSync.NewPoolEx(make(chan originSync.IPoolData, maxServiceEventChannel), func() originSync.IPoolData {
return &event.Event{}
})
SetEventChannelNum(num int)
OpenProfiler()
SetRetire() //设置服务退休状态
IsRetire() bool //服务是否退休
}
type Service struct {
Module
rpcHandler rpc.RpcHandler //rpc
name string //service name
wg sync.WaitGroup
serviceCfg interface{}
goroutineNum int32
startStatus bool
retire int32
eventProcessor event.IEventProcessor
profiler *profiler.Profiler //性能分析器
rpcEventLister rpc.IRpcListener
nodeConnLister rpc.INodeConnListener
natsConnListener rpc.INatsConnListener
discoveryServiceLister rpc.IDiscoveryServiceListener
chanEvent chan event.IEvent
closeSig chan struct{}
}
// RpcConnEvent Node结点连接事件
type RpcConnEvent struct{
IsConnect bool
NodeId int
// DiscoveryServiceEvent 发现服务结点
type DiscoveryServiceEvent struct{
IsDiscovery bool
ServiceName []string
NodeId string
}
type EtcdServiceRecordEvent struct {
NetworkName string
TTLSecond int64
RecordKey string
RecordInfo string
}
type Empty struct {
}
func SetMaxServiceChannel(maxEventChannel int){
maxServiceEventChannel = maxEventChannel
eventPool = originSync.NewPoolEx(make(chan originSync.IPoolData, maxServiceEventChannel), func() originSync.IPoolData {
return &event.Event{}
})
maxServiceEventChannelNum = maxEventChannel
}
func (rpcEventData *RpcConnEvent) GetEventType() event.EventType{
return event.Sys_Event_Rpc_Event
func (rpcEventData *DiscoveryServiceEvent) GetEventType() event.EventType{
return event.Sys_Event_DiscoverService
}
func (s *Service) OnSetup(iService IService){
if iService.GetName() == "" {
s.name = reflect.Indirect(reflect.ValueOf(iService)).Type().Name()
@@ -82,13 +106,30 @@ func (s *Service) OnSetup(iService IService){
func (s *Service) OpenProfiler() {
s.profiler = profiler.RegProfiler(s.GetName())
if s.profiler==nil {
log.SFatal("rofiler.RegProfiler ",s.GetName()," fail.")
log.Fatal("rofiler.RegProfiler "+s.GetName()+" fail.")
}
}
func (s *Service) IsRetire() bool{
return atomic.LoadInt32(&s.retire) != 0
}
func (s *Service) SetRetire(){
atomic.StoreInt32(&s.retire,1)
ev := event.NewEvent()
ev.Type = event.Sys_Event_Retire
s.pushEvent(ev)
}
func (s *Service) Init(iService IService,getClientFun rpc.FuncRpcClient,getServerFun rpc.FuncRpcServer,serviceCfg interface{}) {
s.closeSig = make(chan struct{})
s.dispatcher =timer.NewDispatcher(timerDispatcherLen)
s.chanEvent = make(chan event.IEvent,maxServiceEventChannel)
if s.chanEvent == nil {
s.chanEvent = make(chan event.IEvent,maxServiceEventChannelNum)
}
s.rpcHandler.InitRpcHandler(iService.(rpc.IRpcHandler),getClientFun,getServerFun,iService.(rpc.IRpcHandlerChannel))
s.IRpcHandler = &s.rpcHandler
s.self = iService.(IModule)
@@ -102,40 +143,56 @@ func (s *Service) Init(iService IService,getClientFun rpc.FuncRpcClient,getServe
s.eventProcessor.Init(s)
s.eventHandler = event.NewEventHandler()
s.eventHandler.Init(s.eventProcessor)
s.Module.IConcurrent = &concurrent.Concurrent{}
}
func (s *Service) Start() {
s.startStatus = true
var waitRun sync.WaitGroup
for i:=int32(0);i< s.goroutineNum;i++{
s.wg.Add(1)
waitRun.Add(1)
go func(){
log.Info(s.GetName()+" service is running",)
waitRun.Done()
s.Run()
}()
}
waitRun.Wait()
}
func (s *Service) Run() {
log.SDebug("Start running Service ", s.GetName())
defer s.wg.Done()
var bStop = false
concurrent := s.IConcurrent.(*concurrent.Concurrent)
concurrentCBChannel := concurrent.GetCallBackChannel()
s.self.(IService).OnStart()
for{
var analyzer *profiler.Analyzer
select {
case <- closeSig:
case <- s.closeSig:
bStop = true
concurrent.Close()
case cb:=<-concurrentCBChannel:
concurrent.DoCallback(cb)
case ev := <- s.chanEvent:
switch ev.GetEventType() {
case event.Sys_Event_Retire:
log.Info("service OnRetire",log.String("servceName",s.GetName()))
s.self.(IService).OnRetire()
case event.ServiceRpcRequestEvent:
cEvent,ok := ev.(*event.Event)
if ok == false {
log.SError("Type event conversion error")
log.Error("Type event conversion error")
break
}
rpcRequest,ok := cEvent.Data.(*rpc.RpcRequest)
if ok == false {
log.SError("Type *rpc.RpcRequest conversion error")
log.Error("Type *rpc.RpcRequest conversion error")
break
}
if s.profiler!=nil {
@@ -147,16 +204,16 @@ func (s *Service) Run() {
analyzer.Pop()
analyzer = nil
}
eventPool.Put(cEvent)
event.DeleteEvent(cEvent)
case event.ServiceRpcResponseEvent:
cEvent,ok := ev.(*event.Event)
if ok == false {
log.SError("Type event conversion error")
log.Error("Type event conversion error")
break
}
rpcResponseCB,ok := cEvent.Data.(*rpc.Call)
if ok == false {
log.SError("Type *rpc.Call conversion error")
log.Error("Type *rpc.Call conversion error")
break
}
if s.profiler!=nil {
@@ -167,7 +224,7 @@ func (s *Service) Run() {
analyzer.Pop()
analyzer = nil
}
eventPool.Put(cEvent)
event.DeleteEvent(cEvent)
default:
if s.profiler!=nil {
analyzer = s.profiler.Push("[SEvent]"+strconv.Itoa(int(ev.GetEventType())))
@@ -214,11 +271,11 @@ func (s *Service) Release(){
buf := make([]byte, 4096)
l := runtime.Stack(buf, false)
errString := fmt.Sprint(r)
log.SError("core dump info[",errString,"]\n",string(buf[:l]))
log.Dump(string(buf[:l]),log.String("error",errString))
}
}()
s.self.OnRelease()
log.SDebug("Release Service ", s.GetName())
}
func (s *Service) OnRelease(){
@@ -228,14 +285,35 @@ func (s *Service) OnInit() error {
return nil
}
func (s *Service) Wait(){
func (s *Service) Stop(){
log.Info("stop "+s.GetName()+" service ")
close(s.closeSig)
s.wg.Wait()
log.Info(s.GetName()+" service has been stopped")
}
func (s *Service) GetServiceCfg()interface{}{
return s.serviceCfg
}
func (s *Service) ParseServiceCfg(cfg interface{}) error{
if s.serviceCfg == nil {
return errors.New("no service configuration found")
}
rv := reflect.ValueOf(s.serviceCfg)
if rv.Kind() == reflect.Ptr && rv.IsNil() {
return errors.New("no service configuration found")
}
bytes,err := json.Marshal(s.serviceCfg)
if err != nil {
return err
}
return json.Unmarshal(bytes,cfg)
}
func (s *Service) GetProfiler() *profiler.Profiler{
return s.profiler
}
@@ -259,29 +337,68 @@ func (s *Service) RegRawRpc(rpcMethodId uint32,rawRpcCB rpc.RawRpcCallBack){
func (s *Service) OnStart(){
}
func (s *Service) OnRpcEvent(ev event.IEvent){
event := ev.(*RpcConnEvent)
func (s *Service) OnNodeConnEvent(ev event.IEvent){
event := ev.(*rpc.RpcConnEvent)
if event.IsConnect {
s.rpcEventLister.OnNodeConnected(event.NodeId)
s.nodeConnLister.OnNodeConnected(event.NodeId)
}else{
s.rpcEventLister.OnNodeDisconnect(event.NodeId)
s.nodeConnLister.OnNodeDisconnect(event.NodeId)
}
}
func (s *Service) RegRpcListener(rpcEventLister rpc.IRpcListener) {
s.rpcEventLister = rpcEventLister
s.RegEventReceiverFunc(event.Sys_Event_Rpc_Event,s.GetEventHandler(),s.OnRpcEvent)
func (s *Service) OnNatsConnEvent(ev event.IEvent){
event := ev.(*rpc.NatsConnEvent)
if event.IsConnect {
s.natsConnListener.OnNatsConnected()
}else{
s.natsConnListener.OnNatsDisconnect()
}
}
func (s *Service) OnDiscoverServiceEvent(ev event.IEvent){
event := ev.(*DiscoveryServiceEvent)
if event.IsDiscovery {
s.discoveryServiceLister.OnDiscoveryService(event.NodeId,event.ServiceName)
}else{
s.discoveryServiceLister.OnUnDiscoveryService(event.NodeId,event.ServiceName)
}
}
func (s *Service) RegNodeConnListener(nodeConnListener rpc.INodeConnListener) {
s.nodeConnLister = nodeConnListener
s.RegEventReceiverFunc(event.Sys_Event_Node_Conn_Event,s.GetEventHandler(),s.OnNodeConnEvent)
RegRpcEventFun(s.GetName())
}
func (s *Service) UnRegRpcListener(rpcLister rpc.IRpcListener) {
s.UnRegEventReceiverFunc(event.Sys_Event_Rpc_Event,s.GetEventHandler())
func (s *Service) UnRegNodeConnListener() {
s.UnRegEventReceiverFunc(event.Sys_Event_Node_Conn_Event,s.GetEventHandler())
UnRegRpcEventFun(s.GetName())
}
func (s *Service) RegNatsConnListener(natsConnListener rpc.INatsConnListener) {
s.natsConnListener = natsConnListener
s.RegEventReceiverFunc(event.Sys_Event_Nats_Conn_Event,s.GetEventHandler(),s.OnNatsConnEvent)
RegRpcEventFun(s.GetName())
}
func (s *Service) UnRegNatsConnListener() {
s.UnRegEventReceiverFunc(event.Sys_Event_Nats_Conn_Event,s.GetEventHandler())
UnRegRpcEventFun(s.GetName())
}
func (s *Service) RegDiscoverListener(discoveryServiceListener rpc.IDiscoveryServiceListener) {
s.discoveryServiceLister = discoveryServiceListener
s.RegEventReceiverFunc(event.Sys_Event_DiscoverService,s.GetEventHandler(),s.OnDiscoverServiceEvent)
RegRpcEventFun(s.GetName())
}
func (s *Service) UnRegDiscoverListener() {
s.UnRegEventReceiverFunc(event.Sys_Event_DiscoverService,s.GetEventHandler())
UnRegRpcEventFun(s.GetName())
}
func (s *Service) PushRpcRequest(rpcRequest *rpc.RpcRequest) error{
ev := eventPool.Get().(*event.Event)
ev := event.NewEvent()
ev.Type = event.ServiceRpcRequestEvent
ev.Data = rpcRequest
@@ -289,7 +406,7 @@ func (s *Service) PushRpcRequest(rpcRequest *rpc.RpcRequest) error{
}
func (s *Service) PushRpcResponse(call *rpc.Call) error{
ev := eventPool.Get().(*event.Event)
ev := event.NewEvent()
ev.Type = event.ServiceRpcResponseEvent
ev.Data = call
@@ -301,9 +418,9 @@ func (s *Service) PushEvent(ev event.IEvent) error{
}
func (s *Service) pushEvent(ev event.IEvent) error{
if len(s.chanEvent) >= maxServiceEventChannel {
if len(s.chanEvent) >= maxServiceEventChannelNum {
err := errors.New("The event channel in the service is full")
log.SError(err.Error())
log.Error(err.Error())
return err
}
@@ -311,14 +428,32 @@ func (s *Service) pushEvent(ev event.IEvent) error{
return nil
}
func (s *Service) GetServiceEventChannelNum() int{
return len(s.chanEvent)
}
func (s *Service) GetServiceTimerChannelNum() int{
return len(s.dispatcher.ChanTimer)
}
func (s *Service) SetEventChannelNum(num int){
if s.chanEvent == nil {
s.chanEvent = make(chan event.IEvent,num)
}else {
panic("this stage cannot be set")
}
}
func (s *Service) SetGoRoutineNum(goroutineNum int32) bool {
//已经开始状态不允许修改协程数量,打开性能分析器不允许开多线程
if s.startStatus == true || s.profiler!=nil {
log.SError("open profiler mode is not allowed to set Multi-coroutine.")
log.Error("open profiler mode is not allowed to set Multi-coroutine.")
return false
}
s.goroutineNum = goroutineNum
return true
}
func (s *Service) OnRetire(){
}

View File

@@ -1,24 +1,30 @@
package service
import (
"github.com/duanhf2012/origin/v2/log"
"os"
)
//本地所有的service
var mapServiceName map[string]IService
var setupServiceList []IService
type RegRpcEventFunType func(serviceName string)
type RegDiscoveryServiceEventFunType func(serviceName string)
var RegRpcEventFun RegRpcEventFunType
var UnRegRpcEventFun RegRpcEventFunType
func init(){
mapServiceName = map[string]IService{}
setupServiceList = []IService{}
}
func Init(chanCloseSig chan bool) {
closeSig=chanCloseSig
func Init() {
for _,s := range setupServiceList {
err := s.OnInit()
if err != nil {
panic(err)
log.Error("Failed to initialize "+s.GetName()+" service",log.ErrorAttr("err",err))
os.Exit(1)
}
}
}
@@ -49,8 +55,14 @@ func Start(){
}
}
func WaitStop(){
func StopAllService(){
for i := len(setupServiceList) - 1; i >= 0; i-- {
setupServiceList[i].Wait()
setupServiceList[i].Stop()
}
}
func NotifyAllServiceRetire(){
for i := len(setupServiceList) - 1; i >= 0; i-- {
setupServiceList[i].SetRetire()
}
}

View File

@@ -0,0 +1,314 @@
package ginmodule
import (
"context"
"github.com/duanhf2012/origin/v2/event"
"github.com/duanhf2012/origin/v2/log"
"github.com/duanhf2012/origin/v2/service"
"github.com/gin-gonic/gin"
"log/slog"
"net/http"
"strings"
"time"
"io"
)
type IGinProcessor interface {
Process(data *gin.Context) (*gin.Context, error)
}
type GinModule struct {
service.Module
*gin.Engine
srv *http.Server
listenAddr string
handleTimeout time.Duration
processor []IGinProcessor
}
func (gm *GinModule) Init(addr string, handleTimeout time.Duration,engine *gin.Engine) {
gm.listenAddr = addr
gm.handleTimeout = handleTimeout
gm.Engine = engine
}
func (gm *GinModule) SetupDataProcessor(processor ...IGinProcessor) {
gm.processor = processor
}
func (gm *GinModule) AppendDataProcessor(processor ...IGinProcessor) {
gm.processor = append(gm.processor, processor...)
}
func (gm *GinModule) OnInit() error {
if gm.Engine == nil {
gm.Engine = gin.Default()
}
gm.srv = &http.Server{
Addr: gm.listenAddr,
Handler: gm.Engine,
}
gm.Engine.Use(Logger())
gm.Engine.Use(gin.Recovery())
gm.GetEventProcessor().RegEventReceiverFunc(event.Sys_Event_Gin_Event, gm.GetEventHandler(), gm.eventHandler)
return nil
}
func (gm *GinModule) eventHandler(ev event.IEvent) {
ginEvent := ev.(*GinEvent)
for _, handler := range ginEvent.handlersChain {
handler(&ginEvent.c)
}
//ginEvent.chanWait <- struct{}{}
}
func (gm *GinModule) Start() {
gm.srv.Addr = gm.listenAddr
log.Info("http start listen", slog.Any("addr", gm.listenAddr))
go func() {
err := gm.srv.ListenAndServe()
if err != nil {
log.Error("ListenAndServe error", slog.Any("error", err.Error()))
}
}()
}
func (gm *GinModule) StartTLS(certFile, keyFile string) {
log.Info("http start listen", slog.Any("addr", gm.listenAddr))
go func() {
err := gm.srv.ListenAndServeTLS(certFile, keyFile)
if err != nil {
log.Fatal("ListenAndServeTLS error", slog.Any("error", err.Error()))
}
}()
}
func (gm *GinModule) Stop(ctx context.Context) {
if err := gm.srv.Shutdown(ctx); err != nil {
log.SError("Server Shutdown", slog.Any("error", err))
}
}
type SafeContext struct {
*gin.Context
chanWait chan struct{}
}
func (c *SafeContext) JSONAndDone(code int, obj any) {
c.Context.JSON(code,obj)
c.Done()
}
func (c *SafeContext) AsciiJSONAndDone(code int, obj any){
c.Context.AsciiJSON(code,obj)
c.Done()
}
func (c *SafeContext) PureJSONAndDone(code int, obj any){
c.Context.PureJSON(code,obj)
c.Done()
}
func (c *SafeContext) XMLAndDone(code int, obj any){
c.Context.XML(code,obj)
c.Done()
}
func (c *SafeContext) YAMLAndDone(code int, obj any){
c.Context.YAML(code,obj)
c.Done()
}
func (c *SafeContext) TOMLAndDone(code int, obj any){
c.Context.TOML(code,obj)
c.Done()
}
func (c *SafeContext) ProtoBufAndDone(code int, obj any){
c.Context.ProtoBuf(code,obj)
c.Done()
}
func (c *SafeContext) StringAndDone(code int, format string, values ...any){
c.Context.String(code,format,values...)
c.Done()
}
func (c *SafeContext) RedirectAndDone(code int, location string){
c.Context.Redirect(code,location)
c.Done()
}
func (c *SafeContext) DataAndDone(code int, contentType string, data []byte){
c.Context.Data(code,contentType,data)
c.Done()
}
func (c *SafeContext) DataFromReaderAndDone(code int, contentLength int64, contentType string, reader io.Reader, extraHeaders map[string]string){
c.DataFromReader(code,contentLength,contentType,reader,extraHeaders)
c.Done()
}
func (c *SafeContext) HTMLAndDone(code int, name string, obj any){
c.Context.HTML(code,name,obj)
c.Done()
}
func (c *SafeContext) IndentedJSONAndDone(code int, obj any){
c.Context.IndentedJSON(code,obj)
c.Done()
}
func (c *SafeContext) SecureJSONAndDone(code int, obj any){
c.Context.SecureJSON(code,obj)
c.Done()
}
func (c *SafeContext) JSONPAndDone(code int, obj any){
c.Context.JSONP(code,obj)
c.Done()
}
func (c *SafeContext) Done(){
c.chanWait <- struct{}{}
}
type GinEvent struct {
handlersChain []SafeHandlerFunc
c SafeContext
}
type SafeHandlerFunc func(*SafeContext)
func (ge *GinEvent) GetEventType() event.EventType {
return event.Sys_Event_Gin_Event
}
func (gm *GinModule) handleMethod(httpMethod, relativePath string, handlers ...SafeHandlerFunc) gin.IRoutes {
return gm.Engine.Handle(httpMethod, relativePath, func(c *gin.Context) {
for _, p := range gm.processor {
_, err := p.Process(c)
if err != nil {
return
}
}
var ev GinEvent
chanWait := make(chan struct{},2)
ev.c.chanWait = chanWait
ev.handlersChain = handlers
ev.c.Context = c
gm.NotifyEvent(&ev)
ctx,cancel := context.WithTimeout(context.Background(), gm.handleTimeout)
defer cancel()
select{
case <-ctx.Done():
log.Error("GinModule process timeout", slog.Any("path", c.Request.URL.Path))
c.AbortWithStatus(http.StatusRequestTimeout)
case <-chanWait:
}
})
}
// GET 回调处理是在gin协程中
func (gm *GinModule) GET(relativePath string, handlers ...gin.HandlerFunc) gin.IRoutes {
return gm.Engine.GET(relativePath, handlers...)
}
// POST 回调处理是在gin协程中
func (gm *GinModule) POST(relativePath string, handlers ...gin.HandlerFunc) gin.IRoutes {
return gm.Engine.POST(relativePath, handlers...)
}
// DELETE 回调处理是在gin协程中
func (gm *GinModule) DELETE(relativePath string, handlers ...gin.HandlerFunc) gin.IRoutes {
return gm.Engine.DELETE(relativePath, handlers...)
}
// PATCH 回调处理是在gin协程中
func (gm *GinModule) PATCH(relativePath string, handlers ...gin.HandlerFunc) gin.IRoutes {
return gm.Engine.PATCH(relativePath, handlers...)
}
// Put 回调处理是在gin协程中
func (gm *GinModule) Put(relativePath string, handlers ...gin.HandlerFunc) gin.IRoutes {
return gm.Engine.PUT(relativePath, handlers...)
}
// SafeGET 回调处理是在service协程中
func (gm *GinModule) SafeGET(relativePath string, handlers ...SafeHandlerFunc) gin.IRoutes {
return gm.handleMethod(http.MethodGet, relativePath, handlers...)
}
// SafePOST 回调处理是在service协程中
func (gm *GinModule) SafePOST(relativePath string, handlers ...SafeHandlerFunc) gin.IRoutes {
return gm.handleMethod(http.MethodPost, relativePath, handlers...)
}
// SafeDELETE 回调处理是在service协程中
func (gm *GinModule) SafeDELETE(relativePath string, handlers ...SafeHandlerFunc) gin.IRoutes {
return gm.handleMethod(http.MethodDelete, relativePath, handlers...)
}
// SafePATCH 回调处理是在service协程中
func (gm *GinModule) SafePATCH(relativePath string, handlers ...SafeHandlerFunc) gin.IRoutes {
return gm.handleMethod(http.MethodPatch, relativePath, handlers...)
}
// SafePut 回调处理是在service协程中
func (gm *GinModule) SafePut(relativePath string, handlers ...SafeHandlerFunc) gin.IRoutes {
return gm.handleMethod(http.MethodPut, relativePath, handlers...)
}
func GetIPWithProxyHeaders(c *gin.Context) string {
// 尝试从 X-Real-IP 头部获取真实 IP
ip := c.GetHeader("X-Real-IP")
// 如果 X-Real-IP 头部不存在,则尝试从 X-Forwarded-For 头部获取
if ip == "" {
ip = c.GetHeader("X-Forwarded-For")
}
// 如果两者都不存在,则使用默认的 ClientIP 方法获取 IP
if ip == "" {
ip = c.ClientIP()
}
return ip
}
func GetIPWithValidatedProxyHeaders(c *gin.Context) string {
// 获取代理头部
proxyHeaders := c.Request.Header.Get("X-Real-IP,X-Forwarded-For")
// 分割代理头部,取第一个 IP 作为真实 IP
ips := strings.Split(proxyHeaders, ",")
ip := strings.TrimSpace(ips[0])
// 如果 IP 格式合法,则使用获取到的 IP否则使用默认的 ClientIP 方法获取
if isValidIP(ip) {
return ip
} else {
ip = c.ClientIP()
return ip
}
}
// isValidIP 判断 IP 格式是否合法
func isValidIP(ip string) bool {
// 此处添加自定义的 IP 格式验证逻辑
// 例如,使用正则表达式验证 IP 格式
// ...
if ip == "" {
return false
}
return true
}

View File

@@ -0,0 +1,79 @@
package ginmodule
import (
"fmt"
"github.com/duanhf2012/origin/v2/log"
"github.com/gin-gonic/gin"
"time"
)
// Logger 是一个自定义的日志中间件
func Logger() gin.HandlerFunc {
return func(c *gin.Context) {
// 处理请求前记录日志
// 开始时间
startTime := time.Now()
// 调用该请求的剩余处理程序
c.Next()
// 结束时间
endTime := time.Now()
// 执行时间
latencyTime := endTime.Sub(startTime)
// 请求IP
clientIP := c.ClientIP()
// remoteIP := c.RemoteIP()
// 请求方式
reqMethod := c.Request.Method
// 请求路由
reqUri := c.Request.RequestURI
// 请求协议
reqProto := c.Request.Proto
// 请求来源
repReferer := c.Request.Referer()
// 请求UA
reqUA := c.Request.UserAgent()
// 请求响应内容长度
resLength := c.Writer.Size()
if resLength < 0 {
resLength = 0
}
// 响应状态码
statusCode := c.Writer.Status()
log.SDebug(fmt.Sprintf(
"%s | %3d | %s %10s | \033[44;37m%-6s\033[0m %s %s | %10v | \"%s\" \"%s\"",
colorForStatus(statusCode),
statusCode,
colorForStatus(0),
clientIP,
// remoteIP,
reqMethod,
reqUri,
reqProto,
latencyTime,
reqUA,
repReferer,
))
}
}
// colorForStatus 根据 HTTP 状态码返回 ANSI 颜色代码
func colorForStatus(code int) string {
switch {
case code >= 200 && code < 300:
return "\033[42;1;37m" // green
case code >= 300 && code < 400:
return "\033[34m" // blue
case code >= 400 && code < 500:
return "\033[33m" // yellow
case code == 0:
return "\033[0m" // cancel
default:
return "\033[31m" // red
}
}

View File

@@ -4,13 +4,13 @@ import (
"bytes"
"crypto/tls"
"fmt"
"io/ioutil"
"io"
"net"
"net/http"
"net/url"
"time"
"github.com/duanhf2012/origin/service"
"github.com/duanhf2012/origin/v2/service"
)
type HttpClientModule struct {
@@ -43,7 +43,15 @@ func (slf *SyncHttpResponse) Get(timeoutMs int) HttpResponse {
}
}
func (m *HttpClientModule) Init(maxpool int, proxyUrl string) {
func (m *HttpClientModule) InitHttpClient(transport http.RoundTripper,timeout time.Duration,checkRedirect func(req *http.Request, via []*http.Request) error){
m.client = &http.Client{
Transport: transport,
Timeout: timeout,
CheckRedirect: checkRedirect,
}
}
func (m *HttpClientModule) Init(proxyUrl string, maxpool int, dialTimeout time.Duration,dialKeepAlive time.Duration,idleConnTimeout time.Duration,timeout time.Duration) {
type ProxyFun func(_ *http.Request) (*url.URL, error)
var proxyFun ProxyFun
if proxyUrl != "" {
@@ -55,16 +63,16 @@ func (m *HttpClientModule) Init(maxpool int, proxyUrl string) {
m.client = &http.Client{
Transport: &http.Transport{
DialContext: (&net.Dialer{
Timeout: 5 * time.Second,
KeepAlive: 30 * time.Second,
Timeout: dialTimeout,
KeepAlive: dialKeepAlive,
}).DialContext,
MaxIdleConns: maxpool,
MaxIdleConnsPerHost: maxpool,
IdleConnTimeout: 60 * time.Second,
IdleConnTimeout: idleConnTimeout,
Proxy: proxyFun,
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
},
Timeout: 5 * time.Second,
Timeout: timeout,
}
}
@@ -103,7 +111,7 @@ func (m *HttpClientModule) Request(method string, url string, body []byte, heade
}
defer rsp.Body.Close()
ret.Body, err = ioutil.ReadAll(rsp.Body)
ret.Body, err = io.ReadAll(rsp.Body)
if err != nil {
ret.Err = err
return ret

View File

@@ -0,0 +1,65 @@
package kafkamodule
import "github.com/IBM/sarama"
type KafkaAdmin struct {
sarama.ClusterAdmin
mapTopic map[string]sarama.TopicDetail
}
func (ka *KafkaAdmin) Setup(kafkaVersion string, addrs []string) error {
config := sarama.NewConfig()
var err error
config.Version, err = sarama.ParseKafkaVersion(kafkaVersion)
if err != nil {
return err
}
ka.ClusterAdmin, err = sarama.NewClusterAdmin(addrs, config)
if err != nil {
return err
}
ka.mapTopic, err = ka.GetTopics()
if err != nil {
ka.ClusterAdmin.Close()
return err
}
return nil
}
func (ka *KafkaAdmin) RefreshTopic() error {
var err error
ka.mapTopic, err = ka.GetTopics()
return err
}
func (ka *KafkaAdmin) HasTopic(topic string) bool {
_, ok := ka.mapTopic[topic]
return ok
}
func (ka *KafkaAdmin) GetTopicDetail(topic string) *sarama.TopicDetail {
topicDetail, ok := ka.mapTopic[topic]
if ok == false {
return nil
}
return &topicDetail
}
func (ka *KafkaAdmin) GetTopics() (map[string]sarama.TopicDetail, error) {
return ka.ListTopics()
}
// CreateTopic 创建主题
// numPartitions分区数
// replicationFactor副本数
// validateOnly参数执行操作时只进行参数验证而不实际执行操作
func (ka *KafkaAdmin) CreateTopic(topic string, numPartitions int32, replicationFactor int16, validateOnly bool) error {
return ka.ClusterAdmin.CreateTopic(topic, &sarama.TopicDetail{NumPartitions: numPartitions, ReplicationFactor: replicationFactor}, validateOnly)
}

View File

@@ -0,0 +1,289 @@
package kafkamodule
import (
"context"
"fmt"
"github.com/IBM/sarama"
"github.com/duanhf2012/origin/v2/log"
"sync"
"time"
)
type ConsumerGroup struct {
sarama.ConsumerGroup
waitGroup sync.WaitGroup
chanExit chan error
ready chan bool
cancel context.CancelFunc
groupId string
}
func NewConsumerConfig(kafkaVersion string, assignor string, offsetsInitial int64) (*sarama.Config, error) {
var err error
config := sarama.NewConfig()
config.Version, err = sarama.ParseKafkaVersion(kafkaVersion)
config.Consumer.Offsets.Initial = offsetsInitial
config.Consumer.Offsets.AutoCommit.Enable = false
switch assignor {
case "sticky":
// 黏性roundRobin,rebalance之后首先保证前面的分配,从后面剥离
// topic:T0{P0,P1,P2,P3,P4,P5},消费者:C1,C2
// ---------------before rebalance:即roundRobin
// C1: T0{P0} T0{P2} T0{P4}
// C2: T0{P1} T0{P3} T0{P5}
// ----------------after rebalance:增加了一个消费者
// C1: T0{P0} T0{P2}
// C2: T0{P1} T0{P3}
// C3: T0{P4} T0{P5} until每个消费者的分区数误差不超过1
config.Consumer.Group.Rebalance.GroupStrategies = []sarama.BalanceStrategy{sarama.NewBalanceStrategySticky()}
case "roundrobin":
// roundRobin --逐个平均分发
// topic: T0{P0,P1,P2},T1{P0,P1,P2,P3}两个消费者C1,C2
// C1: T0{P0} T0{P2} T1{P1} T1{P3}
// C2: T0{P1} T1{P0} T1{P2}
config.Consumer.Group.Rebalance.GroupStrategies = []sarama.BalanceStrategy{sarama.NewBalanceStrategyRoundRobin()}
case "range":
// 默认值 --一次平均分发
// topic: T0{P0,P1,P2,P3},T1{P0,P1,P2,P3},两个消费者C1,C2
// T1分区总数6 / 消费者数2 = 3 ,即该会话的分区每个消费者分3个
// T2分区总数4 / 消费者数2 = 2 ,即该会话的分区每个消费者分2个
// C1: T0{P0, P1, P2} T1{P0, P1}
// C2: T0{P3, P4, P5} T1{P2, P3}
config.Consumer.Group.Rebalance.GroupStrategies = []sarama.BalanceStrategy{sarama.NewBalanceStrategyRange()}
default:
return nil, fmt.Errorf("Unrecognized consumer group partition assignor: %s", assignor)
}
if err != nil {
return nil, err
}
return config, nil
}
type IMsgReceiver interface {
Receiver(msgs []*sarama.ConsumerMessage) bool
}
func (c *ConsumerGroup) Setup(addr []string, topics []string, groupId string, config *sarama.Config, receiverInterval time.Duration, maxReceiverNum int, msgReceiver IMsgReceiver) error {
var err error
c.ConsumerGroup, err = sarama.NewConsumerGroup(addr, groupId, config)
if err != nil {
return nil
}
c.groupId = groupId
c.chanExit = make(chan error, 1)
var handler ConsumerGroupHandler
handler.receiver = msgReceiver
handler.maxReceiverNum = maxReceiverNum
handler.receiverInterval = receiverInterval
handler.chanExit = c.chanExit
var ctx context.Context
ctx, c.cancel = context.WithCancel(context.Background())
c.waitGroup.Add(1)
go func() {
defer c.waitGroup.Done()
for {
if err = c.Consume(ctx, topics, &handler); err != nil {
// 当setup失败的时候error会返回到这里
log.Error("Error from consumer", log.Any("err", err))
return
}
// check if context was cancelled, signaling that the consumer should stop
if ctx.Err() != nil {
log.Info("consumer stop", log.Any("info", ctx.Err()))
}
c.chanExit <- err
}
}()
err = <-c.chanExit
//已经准备好了
return err
}
func (c *ConsumerGroup) Close() {
log.Info("close consumerGroup")
//1.cancel掉
c.cancel()
//2.关闭连接
err := c.ConsumerGroup.Close()
if err != nil {
log.Error("close consumerGroup fail", log.Any("err", err.Error()))
}
//3.等待退出
c.waitGroup.Wait()
}
type ConsumerGroupHandler struct {
receiver IMsgReceiver
receiverInterval time.Duration
maxReceiverNum int
//mapTopicOffset map[string]map[int32]int //map[topic]map[partitionId]offsetInfo
mapTopicData map[string]*MsgData
mx sync.Mutex
chanExit chan error
isRebalance bool //是否为再平衡
//stopSig *int32
}
type MsgData struct {
sync.Mutex
msg []*sarama.ConsumerMessage
mapPartitionOffset map[int32]int64
}
func (ch *ConsumerGroupHandler) Flush(session sarama.ConsumerGroupSession, topic string) {
if topic != "" {
msgData := ch.GetMsgData(topic)
msgData.flush(session, ch.receiver, topic)
return
}
for tp, msgData := range ch.mapTopicData {
msgData.flush(session, ch.receiver, tp)
}
}
func (ch *ConsumerGroupHandler) GetMsgData(topic string) *MsgData {
ch.mx.Lock()
defer ch.mx.Unlock()
msgData := ch.mapTopicData[topic]
if msgData == nil {
msgData = &MsgData{}
msgData.msg = make([]*sarama.ConsumerMessage, 0, ch.maxReceiverNum)
ch.mapTopicData[topic] = msgData
}
return msgData
}
func (md *MsgData) flush(session sarama.ConsumerGroupSession, receiver IMsgReceiver, topic string) {
if len(md.msg) == 0 {
return
}
//发送给接收者
for {
ok := receiver.Receiver(md.msg)
if ok == true {
break
}
}
for pId, offset := range md.mapPartitionOffset {
session.MarkOffset(topic, pId, offset+1, "")
log.Info(fmt.Sprintf("topic %s,pid %d,offset %d", topic, pId, offset+1))
}
session.Commit()
//log.Info("commit")
//time.Sleep(1000 * time.Second)
//置空
md.msg = md.msg[:0]
clear(md.mapPartitionOffset)
}
func (md *MsgData) appendMsg(session sarama.ConsumerGroupSession, msg *sarama.ConsumerMessage, receiver IMsgReceiver, maxReceiverNum int) {
md.Lock()
defer md.Unlock()
//收到的offset只会越来越大在
if md.mapPartitionOffset == nil {
md.mapPartitionOffset = make(map[int32]int64, 10)
}
md.mapPartitionOffset[msg.Partition] = msg.Offset
md.msg = append(md.msg, msg)
if len(md.msg) < maxReceiverNum {
return
}
md.flush(session, receiver, msg.Topic)
}
func (ch *ConsumerGroupHandler) AppendMsg(session sarama.ConsumerGroupSession, msg *sarama.ConsumerMessage) {
dataMsg := ch.GetMsgData(msg.Topic)
dataMsg.appendMsg(session, msg, ch.receiver, ch.maxReceiverNum)
}
func (ch *ConsumerGroupHandler) Setup(session sarama.ConsumerGroupSession) error {
ch.mapTopicData = make(map[string]*MsgData, 128)
if ch.isRebalance == false {
ch.chanExit <- nil
}
ch.isRebalance = true
return nil
}
func (ch *ConsumerGroupHandler) Cleanup(session sarama.ConsumerGroupSession) error {
ch.Flush(session, "")
return nil
}
func (ch *ConsumerGroupHandler) ConsumeClaim(session sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error {
ticker := time.NewTicker(ch.receiverInterval)
for {
select {
case msg := <-claim.Messages():
if msg == nil {
log.SWarning("claim will exit", log.Any("topic", claim.Topic()), log.Any("Partition", claim.Partition()))
return nil
}
ch.AppendMsg(session, msg)
case <-ticker.C:
ch.Flush(session, claim.Topic())
case <-session.Context().Done():
return nil
}
}
}
/*
阿里云参数说明https://sls.aliyun.com/doc/oscompatibledemo/sarama_go_kafka_consume.html
conf.Net.TLS.Enable = true
conf.Net.SASL.Enable = true
conf.Net.SASL.User = project
conf.Net.SASL.Password = fmt.Sprintf("%s#%s", accessId, accessKey)
conf.Net.SASL.Mechanism = "PLAIN"
conf.Net.TLS.Enable = true
conf.Net.SASL.Enable = true
conf.Net.SASL.User = project
conf.Net.SASL.Password = fmt.Sprintf("%s#%s", accessId, accessKey)
conf.Net.SASL.Mechanism = "PLAIN"
conf.Consumer.Fetch.Min = 1
conf.Consumer.Fetch.Default = 1024 * 1024
conf.Consumer.Retry.Backoff = 2 * time.Second
conf.Consumer.MaxWaitTime = 250 * time.Millisecond
conf.Consumer.MaxProcessingTime = 100 * time.Millisecond
conf.Consumer.Return.Errors = false
conf.Consumer.Offsets.AutoCommit.Enable = true
conf.Consumer.Offsets.AutoCommit.Interval = 1 * time.Second
conf.Consumer.Offsets.Initial = sarama.OffsetOldest
conf.Consumer.Offsets.Retry.Max = 3
*/

View File

@@ -0,0 +1,146 @@
package kafkamodule
import (
"context"
"github.com/IBM/sarama"
"github.com/duanhf2012/origin/v2/log"
"github.com/duanhf2012/origin/v2/service"
"time"
)
type IProducer interface {
}
type SyncProducer struct {
}
type AsyncProducer struct {
}
type Producer struct {
service.Module
sarama.SyncProducer
sarama.AsyncProducer
}
// NewProducerConfig 新建producerConfig
// kafkaVersion kafka版本
// returnErrreturnSucc 是否返回错误与成功
// requiredAcks -1 #全量同步确认,强可靠性保证(当所有的 leader 和 follower 都接收成功时)#WaitForAll 1 #leader 确认收到, 默认(仅 leader 反馈)#WaitForLocal 0 #不确认,但是吞吐量大(不 care 结果) #NoResponse
// Idempotent幂等 确保信息都准确写入一份副本,用于幂等生产者当这一项设置为true的时候生产者将保证生产的消息一定是有序且精确一次的
// partitioner 生成分区器,用于选择向哪个分区发送信息,默认情况下对消息密钥进行散列
func NewProducerConfig(kafkaVersion string, returnErr bool, returnSucc bool, requiredAcks sarama.RequiredAcks, Idempotent bool,
partitioner sarama.PartitionerConstructor) (*sarama.Config, error) {
config := sarama.NewConfig()
var err error
config.Version, err = sarama.ParseKafkaVersion(kafkaVersion)
if err != nil {
return nil, err
}
config.Producer.Return.Errors = returnErr
config.Producer.Return.Successes = returnSucc
config.Producer.RequiredAcks = requiredAcks
config.Producer.Partitioner = partitioner
config.Producer.Timeout = 10 * time.Second
config.Producer.Idempotent = Idempotent
if Idempotent == true {
config.Net.MaxOpenRequests = 1
}
return config, nil
}
func (p *Producer) SyncSetup(addr []string, config *sarama.Config) error {
var err error
p.SyncProducer, err = sarama.NewSyncProducer(addr, config)
if err != nil {
return err
}
return nil
}
func (p *Producer) ASyncSetup(addr []string, config *sarama.Config) error {
var err error
p.AsyncProducer, err = sarama.NewAsyncProducer(addr, config)
if err != nil {
return err
}
go func() {
p.asyncRun()
}()
return nil
}
func (p *Producer) asyncRun() {
for {
select {
case sm := <-p.Successes():
if sm.Metadata == nil {
break
}
asyncReturn := sm.Metadata.(*AsyncReturn)
asyncReturn.chanReturn <- asyncReturn
case em := <-p.Errors():
log.Error("async kafkamodule error", log.ErrorAttr("err", em.Err))
if em.Msg.Metadata == nil {
break
}
asyncReturn := em.Msg.Metadata.(*AsyncReturn)
asyncReturn.Err = em.Err
asyncReturn.chanReturn <- asyncReturn
}
}
}
type AsyncReturn struct {
Msg *sarama.ProducerMessage
Err error
chanReturn chan *AsyncReturn
}
func (ar *AsyncReturn) WaitOk(ctx context.Context) (*sarama.ProducerMessage, error) {
asyncReturn := ar.Msg.Metadata.(*AsyncReturn)
select {
case <-asyncReturn.chanReturn:
return asyncReturn.Msg, asyncReturn.Err
case <-ctx.Done():
return nil, ctx.Err()
}
}
func (p *Producer) AsyncSendMessage(msg *sarama.ProducerMessage) *AsyncReturn {
asyncReturn := AsyncReturn{Msg: msg, chanReturn: make(chan *AsyncReturn, 1)}
msg.Metadata = &asyncReturn
p.AsyncProducer.Input() <- msg
return &asyncReturn
}
func (p *Producer) AsyncPushMessage(msg *sarama.ProducerMessage) {
p.AsyncProducer.Input() <- msg
}
func (p *Producer) Close() {
if p.SyncProducer != nil {
p.SyncProducer.Close()
p.SyncProducer = nil
}
if p.AsyncProducer != nil {
p.AsyncProducer.Close()
p.AsyncProducer = nil
}
}
func (p *Producer) SendMessage(msg *sarama.ProducerMessage) (partition int32, offset int64, err error) {
return p.SyncProducer.SendMessage(msg)
}
func (p *Producer) SendMessages(msgs []*sarama.ProducerMessage) error {
return p.SyncProducer.SendMessages(msgs)
}

View File

@@ -0,0 +1,151 @@
package kafkamodule
import (
"context"
"fmt"
"github.com/IBM/sarama"
"testing"
"time"
)
// 对各参数和机制名称的说明https://blog.csdn.net/u013311345/article/details/129217728
type MsgReceiver struct {
t *testing.T
}
func (mr *MsgReceiver) Receiver(msgs []*sarama.ConsumerMessage) bool {
for _, m := range msgs {
mr.t.Logf("time:%s, topic:%s, partition:%d, offset:%d, key:%s, value:%s", time.Now().Format("2006-01-02 15:04:05.000"), m.Topic, m.Partition, m.Offset, m.Key, string(m.Value))
}
return true
}
var addr = []string{"192.168.13.24:9092", "192.168.13.24:9093", "192.168.13.24:9094", "192.168.13.24:9095"}
var topicName = []string{"test_topic_1", "test_topic_2"}
var kafkaVersion = "3.3.1"
func producer(t *testing.T) {
var admin KafkaAdmin
err := admin.Setup(kafkaVersion, addr)
if err != nil {
t.Fatal(err)
}
for _, tName := range topicName {
if admin.HasTopic(tName) == false {
err = admin.CreateTopic(tName, 2, 2, false)
t.Log(err)
}
}
var pd Producer
cfg, err := NewProducerConfig(kafkaVersion, true, true, sarama.WaitForAll, false, sarama.NewHashPartitioner)
if err != nil {
t.Fatal(err)
}
err = pd.SyncSetup(addr, cfg)
if err != nil {
t.Fatal(err)
}
now := time.Now()
//msgs := make([]*sarama.ProducerMessage, 0, 20000)
for i := 0; i < 20000; i++ {
var msg sarama.ProducerMessage
msg.Key = sarama.StringEncoder(fmt.Sprintf("%d", i))
msg.Topic = topicName[0]
msg.Value = sarama.StringEncoder(fmt.Sprintf("i'm %d", i))
pd.SendMessage(&msg)
//msgs = append(msgs, &msg)
}
//err = pd.SendMessages(msgs)
//t.Log(err)
t.Log(time.Now().Sub(now).Milliseconds())
pd.Close()
}
func producer_async(t *testing.T) {
var admin KafkaAdmin
err := admin.Setup(kafkaVersion, addr)
if err != nil {
t.Fatal(err)
}
for _, tName := range topicName {
if admin.HasTopic(tName) == false {
err = admin.CreateTopic(tName, 10, 2, false)
t.Log(err)
}
}
var pd Producer
cfg, err := NewProducerConfig(kafkaVersion, true, true, sarama.WaitForAll, false, sarama.NewHashPartitioner)
if err != nil {
t.Fatal(err)
}
err = pd.ASyncSetup(addr, cfg)
if err != nil {
t.Fatal(err)
}
now := time.Now()
msgs := make([]*AsyncReturn, 0, 20000)
for i := 0; i < 200000; i++ {
var msg sarama.ProducerMessage
msg.Key = sarama.StringEncoder(fmt.Sprintf("%d", i))
msg.Topic = topicName[0]
msg.Value = sarama.StringEncoder(fmt.Sprintf("i'm %d", i))
r := pd.AsyncSendMessage(&msg)
msgs = append(msgs, r)
}
//err = pd.SendMessages(msgs)
//t.Log(err)
for _, r := range msgs {
r.WaitOk(context.Background())
//t.Log(m, e)
}
t.Log(time.Now().Sub(now).Milliseconds())
time.Sleep(1000 * time.Second)
pd.Close()
}
func consumer(t *testing.T) {
var admin KafkaAdmin
err := admin.Setup(kafkaVersion, addr)
if err != nil {
t.Fatal(err)
}
for _, tName := range topicName {
if admin.HasTopic(tName) == false {
err = admin.CreateTopic(tName, 10, 2, false)
t.Log(err)
}
}
var cg ConsumerGroup
cfg, err := NewConsumerConfig(kafkaVersion, "sticky", sarama.OffsetOldest)
if err != nil {
t.Fatal(err)
}
err = cg.Setup(addr, topicName, "test_groupId", cfg, 50*time.Second, 10, &MsgReceiver{t: t})
t.Log(err)
time.Sleep(10000 * time.Second)
cg.Close()
}
func TestConsumerAndProducer(t *testing.T) {
producer_async(t)
//go producer(t)
//producer(t)
//consumer(t)
}

View File

@@ -0,0 +1,7 @@
package kafkamodule
type Sasl struct {
UserName string `json:"UserName"`
Passwd string `json:"Passwd"`
InstanceId string `json:"InstanceId"`
}

View File

@@ -52,10 +52,10 @@ func (mm *MongoModule) TakeSession() Session {
return Session{Client: mm.client, maxOperatorTimeOut: mm.maxOperatorTimeOut}
}
func (s *Session) CountDocument(db string, collection string) (int64, error) {
func (s *Session) CountDocument(db string, collection string, filter interface{}) (int64, error) {
ctxTimeout, cancel := s.GetDefaultContext()
defer cancel()
return s.Database(db).Collection(collection).CountDocuments(ctxTimeout, bson.D{})
return s.Database(db).Collection(collection).CountDocuments(ctxTimeout, filter)
}
func (s *Session) NextSeq(db string, collection string, id interface{}) (int, error) {
@@ -68,34 +68,39 @@ func (s *Session) NextSeq(db string, collection string, id interface{}) (int, er
after := options.After
updateOpts := options.FindOneAndUpdateOptions{ReturnDocument: &after}
err := s.Client.Database(db).Collection(collection).FindOneAndUpdate(ctxTimeout, bson.M{"_id": id}, bson.M{"$inc": bson.M{"Seq": 1}},&updateOpts).Decode(&res)
err := s.Client.Database(db).Collection(collection).FindOneAndUpdate(ctxTimeout, bson.M{"_id": id}, bson.M{"$inc": bson.M{"Seq": 1}}, &updateOpts).Decode(&res)
return res.Seq, err
}
//indexKeys[索引][每个索引key字段]
func (s *Session) EnsureIndex(db string, collection string, indexKeys [][]string, bBackground bool,sparse bool) error {
return s.ensureIndex(db, collection, indexKeys, bBackground, false,sparse)
// indexKeys[索引][每个索引key字段]
func (s *Session) EnsureIndex(db string, collection string, indexKeys [][]string, bBackground bool, sparse bool, asc bool) error {
return s.ensureIndex(db, collection, indexKeys, bBackground, false, sparse, asc)
}
//indexKeys[索引][每个索引key字段]
func (s *Session) EnsureUniqueIndex(db string, collection string, indexKeys [][]string, bBackground bool,sparse bool) error {
return s.ensureIndex(db, collection, indexKeys, bBackground, true,sparse)
// indexKeys[索引][每个索引key字段]
func (s *Session) EnsureUniqueIndex(db string, collection string, indexKeys [][]string, bBackground bool, sparse bool, asc bool) error {
return s.ensureIndex(db, collection, indexKeys, bBackground, true, sparse, asc)
}
//keys[索引][每个索引key字段]
func (s *Session) ensureIndex(db string, collection string, indexKeys [][]string, bBackground bool, unique bool,sparse bool) error {
// keys[索引][每个索引key字段]
func (s *Session) ensureIndex(db string, collection string, indexKeys [][]string, bBackground bool, unique bool, sparse bool, asc bool) error {
var indexes []mongo.IndexModel
for _, keys := range indexKeys {
keysDoc := bsonx.Doc{}
for _, key := range keys {
keysDoc = keysDoc.Append(key, bsonx.Int32(1))
if asc {
keysDoc = keysDoc.Append(key, bsonx.Int32(1))
} else {
keysDoc = keysDoc.Append(key, bsonx.Int32(-1))
}
}
options:= options.Index().SetUnique(unique).SetBackground(bBackground)
options := options.Index().SetUnique(unique).SetBackground(bBackground)
if sparse == true {
options.SetSparse(true)
}
indexes = append(indexes, mongo.IndexModel{Keys: keysDoc, Options:options })
indexes = append(indexes, mongo.IndexModel{Keys: keysDoc, Options: options})
}
ctxTimeout, cancel := context.WithTimeout(context.Background(), s.maxOperatorTimeOut)

View File

@@ -1,181 +0,0 @@
package mongomodule
import (
"github.com/duanhf2012/origin/log"
"gopkg.in/mgo.v2"
"gopkg.in/mgo.v2/bson"
"sync"
"time"
"container/heap"
_ "gopkg.in/mgo.v2"
)
// session
type Session struct {
*mgo.Session
ref int
index int
}
type SessionHeap []*Session
func (h SessionHeap) Len() int {
return len(h)
}
func (h SessionHeap) Less(i, j int) bool {
return h[i].ref < h[j].ref
}
func (h SessionHeap) Swap(i, j int) {
h[i], h[j] = h[j], h[i]
h[i].index = i
h[j].index = j
}
func (h *SessionHeap) Push(s interface{}) {
s.(*Session).index = len(*h)
*h = append(*h, s.(*Session))
}
func (h *SessionHeap) Pop() interface{} {
l := len(*h)
s := (*h)[l-1]
s.index = -1
*h = (*h)[:l-1]
return s
}
type DialContext struct {
sync.Mutex
sessions SessionHeap
}
type MongoModule struct {
dailContext *DialContext
}
func (slf *MongoModule) Init(url string,sessionNum uint32,dialTimeout time.Duration, timeout time.Duration) error {
var err error
slf.dailContext, err = dialWithTimeout(url, sessionNum, dialTimeout, timeout)
return err
}
func (slf *MongoModule) Ref() *Session{
return slf.dailContext.Ref()
}
func (slf *MongoModule) UnRef(s *Session) {
slf.dailContext.UnRef(s)
}
// goroutine safe
func dialWithTimeout(url string, sessionNum uint32, dialTimeout time.Duration, timeout time.Duration) (*DialContext, error) {
if sessionNum <= 0 {
sessionNum = 100
log.Release("invalid sessionNum, reset to %v", sessionNum)
}
s, err := mgo.DialWithTimeout(url, dialTimeout)
if err != nil {
return nil, err
}
s.SetMode(mgo.Strong,true)
s.SetSyncTimeout(timeout)
s.SetSocketTimeout(timeout)
c := new(DialContext)
// sessions
c.sessions = make(SessionHeap, sessionNum)
c.sessions[0] = &Session{s, 0, 0}
for i := 1; i < int(sessionNum); i++ {
c.sessions[i] = &Session{s.New(), 0, i}
}
heap.Init(&c.sessions)
return c, nil
}
// goroutine safe
func (c *DialContext) Close() {
c.Lock()
for _, s := range c.sessions {
s.Close()
}
c.Unlock()
}
// goroutine safe
func (c *DialContext) Ref() *Session {
c.Lock()
s := c.sessions[0]
if s.ref == 0 {
s.Refresh()
}
s.ref++
heap.Fix(&c.sessions, 0)
c.Unlock()
return s
}
// goroutine safe
func (c *DialContext) UnRef(s *Session) {
if s == nil {
return
}
c.Lock()
s.ref--
heap.Fix(&c.sessions, s.index)
c.Unlock()
}
// goroutine safe
func (s *Session) EnsureCounter(db string, collection string, id string) error {
err := s.DB(db).C(collection).Insert(bson.M{
"_id": id,
"seq": 0,
})
if mgo.IsDup(err) {
return nil
} else {
return err
}
}
// goroutine safe
func (s *Session) NextSeq(db string, collection string, id string) (int, error) {
var res struct {
Seq int
}
_, err := s.DB(db).C(collection).FindId(id).Apply(mgo.Change{
Update: bson.M{"$inc": bson.M{"seq": 1}},
ReturnNew: true,
}, &res)
return res.Seq, err
}
// goroutine safe
func (s *Session) EnsureIndex(db string, collection string, key []string, bBackground bool) error {
return s.DB(db).C(collection).EnsureIndex(mgo.Index{
Key: key,
Unique: false,
Sparse: true,
Background: bBackground,
})
}
// goroutine safe
func (s *Session) EnsureUniqueIndex(db string, collection string, key []string, bBackground bool) error {
return s.DB(db).C(collection).EnsureIndex(mgo.Index{
Key: key,
Unique: true,
Sparse: true,
Background: bBackground,
})
}

View File

@@ -1,95 +0,0 @@
package mongomodule
import (
"fmt"
_ "gopkg.in/mgo.v2"
"gopkg.in/mgo.v2/bson"
"testing"
"time"
)
type Student struct {
ID bson.ObjectId `bson:"_id"`
Name string `bson: "name"`
Age int `bson: "age"`
Sid string `bson: "sid"`
Status int `bson: "status"`
}
type StudentName struct {
Name string `bson: "name"`
}
func Test_Example(t *testing.T) {
module:=MongoModule{}
module.Init("mongodb://admin:123456@192.168.2.119:27017",100, 5*time.Second,5*time.Second)
// take session
s := module.Take()
c := s.DB("test2").C("t_student")
//2.定义对象
insertData := Student{
ID:bson.NewObjectId(),
Name: "seeta11",
Age: 35, //*^_^*
Sid: "s20180907",
Status: 1,
}
updateData := Student{
Name: "seeta11",
Age: 18,
Sid: "s20180907",
Status: 1,
}
//3.插入数据
err := c.Insert(&insertData)
//4.查找数据
selector := bson.M{"_id":bson.ObjectIdHex("5f25303e999c622d361989b0")}
m:=Student{}
err = c.Find(selector).One(&m)
//5.更新数据
//selector2 := bson.M{"_id":bson.ObjectIdHex("5f25303e999c622d361989b0")}
updateData.ID = bson.ObjectIdHex("5f25303e999c622d361989b0")
err = c.UpdateId(bson.ObjectIdHex("5f25303e999c622d361989b0"),&updateData)
if err != nil {
fmt.Print(err)
}
//6.删除数据
err = c.RemoveId(bson.ObjectIdHex("5f252f09999c622d36198951"))
if err != nil {
fmt.Print(err)
}
//7.序号自增
s.EnsureCounter("test2","t_student","5f252f09999c622d36198951")
for i := 0; i < 3; i++ {
id, err := s.NextSeq("test2", "t_student", "5f252f09999c622d36198951")
if err != nil {
fmt.Println(err)
return
}
fmt.Println(id)
}
//8.setoninsert使用
info,uErr := c.Upsert(bson.M{"_id":bson.ObjectIdHex("5f252f09999c622d36198951")},bson.M{
"$setOnInsert":bson.M{"Name":"setoninsert","Age":55}})
//9.修改部分数字数据
selector1 := bson.M{"_id":bson.ObjectIdHex("60473de655f1012e7453b369")}
update1 := bson.M{"$set":bson.M{"name":"xxxxx","age":1111}}
c.Update(selector1,update1)
fmt.Println(info,uErr)
}

View File

@@ -1,10 +1,10 @@
package mysqlmondule
package mysqlmodule
import (
"database/sql"
"errors"
"fmt"
"github.com/duanhf2012/origin/log"
"github.com/duanhf2012/origin/v2/log"
"net/url"
"reflect"
"strconv"
@@ -12,7 +12,7 @@ import (
"sync"
"time"
"github.com/duanhf2012/origin/service"
"github.com/duanhf2012/origin/v2/service"
_ "github.com/go-sql-driver/mysql"
)

View File

@@ -1,4 +1,4 @@
package mysqlmondule
package mysqlmodule
import (
"testing"

View File

@@ -5,11 +5,11 @@ import (
"encoding/json"
"errors"
"fmt"
"github.com/duanhf2012/origin/log"
"github.com/duanhf2012/origin/v2/log"
"strconv"
"time"
"github.com/duanhf2012/origin/service"
"github.com/duanhf2012/origin/v2/service"
"github.com/gomodule/redigo/redis"
)
@@ -42,17 +42,17 @@ type RedisModule struct {
// ConfigRedis 服务器配置
type ConfigRedis struct {
IP string
Port int
Password string
DbIndex int
MaxIdle int //最大的空闲连接数表示即使没有redis连接时依然可以保持N个空闲的连接而不被清除随时处于待命状态。
MaxActive int //最大的激活连接数表示同时最多有N个连接
IdleTimeout int //最大的空闲连接等待时间,超过此时间后,空闲连接将被关闭
IP string
Port int
Password string
DbIndex int
MaxIdle int //最大的空闲连接数表示即使没有redis连接时依然可以保持N个空闲的连接而不被清除随时处于待命状态。
MaxActive int //最大的激活连接数表示同时最多有N个连接
IdleTimeout int //最大的空闲连接等待时间,超过此时间后,空闲连接将被关闭
}
func (m *RedisModule) Init(redisCfg *ConfigRedis) {
redisServer := fmt.Sprintf("%s:%d",redisCfg.IP, redisCfg.Port)
redisServer := fmt.Sprintf("%s:%d", redisCfg.IP, redisCfg.Port)
m.redisPool = &redis.Pool{
Wait: true,
MaxIdle: redisCfg.MaxIdle,
@@ -192,7 +192,6 @@ func (m *RedisModule) HSetStruct(key string, val interface{}) error {
}
defer conn.Close()
_, err = conn.Do("HSET", redis.Args{}.Add(key).AddFlat(val)...)
if err != nil {
return err
@@ -254,11 +253,11 @@ func (m *RedisModule) setMuchStringByExpire(mapInfo map[interface{}]interface{},
}
}
if serr!=nil {
if serr != nil {
log.Error("setMuchStringByExpire fail,reason:%v", serr)
conn.Do("DISCARD")
return serr
}else{
} else {
_, err = conn.Do("EXEC")
}
@@ -287,7 +286,7 @@ func (m *RedisModule) GetString(key interface{}) (string, error) {
return "", err
}
return redis.String(ret,nil)
return redis.String(ret, nil)
}
func (m *RedisModule) GetStringJSON(key string, st interface{}) error {
@@ -345,7 +344,7 @@ func (m *RedisModule) GetStringMap(keys []string) (retMap map[string]string, err
if err != nil {
log.Error("GetMuchString fail,reason:%v", err)
conn.Do("DISCARD")
return nil,err
return nil, err
}
}
@@ -442,7 +441,7 @@ func (m *RedisModule) DelStringKeyList(keys []interface{}) (map[interface{}]bool
if err != nil {
log.Error("DelMuchString fail,reason:%v", err)
conn.Do("DISCARD")
return nil,err
return nil, err
}
}
// 执行命令
@@ -491,7 +490,7 @@ func (m *RedisModule) SetHash(redisKey, hashKey, value interface{}) error {
return retErr
}
//GetRedisAllHashJSON ...
// GetRedisAllHashJSON ...
func (m *RedisModule) GetAllHashJSON(redisKey string) (map[string]string, error) {
if redisKey == "" {
return nil, errors.New("Key Is Empty")
@@ -531,7 +530,7 @@ func (m *RedisModule) GetHash(redisKey interface{}, fieldKey interface{}) (strin
return "", errors.New("Reids Get Hash nil")
}
return redis.String(value,nil)
return redis.String(value, nil)
}
func (m *RedisModule) GetMuchHash(args ...interface{}) ([]string, error) {
@@ -556,7 +555,7 @@ func (m *RedisModule) GetMuchHash(args ...interface{}) ([]string, error) {
valueList := value.([]interface{})
retList := []string{}
for _, valueItem := range valueList{
for _, valueItem := range valueList {
valueByte, ok := valueItem.([]byte)
if !ok {
retList = append(retList, "")
@@ -618,8 +617,8 @@ func (m *RedisModule) SetHashMapJSON(redisKey string, mapFieldValue map[interfac
for symbol, val := range mapFieldValue {
temp, err := json.Marshal(val)
if err == nil {
_,err = conn.Do("HSET", redisKey, symbol, temp)
if err!=nil {
_, err = conn.Do("HSET", redisKey, symbol, temp)
if err != nil {
log.Error("SetMuchHashJSON fail,reason:%v", err)
conn.Send("DISCARD")
return err
@@ -650,25 +649,25 @@ func (m *RedisModule) DelHash(args ...interface{}) error {
}
func (m *RedisModule) LPushList(args ...interface{}) error {
err := m.setListPush("LPUSH",args...)
err := m.setListPush("LPUSH", args...)
return err
}
func (m *RedisModule) LPushListJSON(key interface{}, value ...interface{}) error {
return m.setListJSONPush("LPUSH",key,value...)
return m.setListJSONPush("LPUSH", key, value...)
}
func (m *RedisModule) RPushList(args ...interface{}) error {
err := m.setListPush("RPUSH",args...)
err := m.setListPush("RPUSH", args...)
return err
}
func (m *RedisModule) RPushListJSON(key interface{}, value ...interface{}) error {
return m.setListJSONPush("RPUSH",key,value...)
return m.setListJSONPush("RPUSH", key, value...)
}
//LPUSH和RPUSH
func (m *RedisModule) setListPush(setType string,args...interface{}) error {
// LPUSH和RPUSH
func (m *RedisModule) setListPush(setType string, args ...interface{}) error {
if setType != "LPUSH" && setType != "RPUSH" {
return errors.New("Redis List Push Type Error,Must Be LPUSH or RPUSH")
}
@@ -685,17 +684,17 @@ func (m *RedisModule) setListPush(setType string,args...interface{}) error {
return retErr
}
func (m *RedisModule) setListJSONPush(setType string,key interface{}, value ...interface{}) error {
func (m *RedisModule) setListJSONPush(setType string, key interface{}, value ...interface{}) error {
args := []interface{}{key}
for _,v := range value{
for _, v := range value {
jData, err := json.Marshal(v)
if err != nil {
return err
}
args = append(args,string(jData))
args = append(args, string(jData))
}
return m.setListPush(setType,args...)
return m.setListPush(setType, args...)
}
// Lrange ...
@@ -715,7 +714,7 @@ func (m *RedisModule) LRangeList(key string, start, end int) ([]string, error) {
return redis.Strings(reply, err)
}
//获取List的长度
// 获取List的长度
func (m *RedisModule) GetListLen(key string) (int, error) {
conn, err := m.getConn()
if err != nil {
@@ -731,11 +730,11 @@ func (m *RedisModule) GetListLen(key string) (int, error) {
return redis.Int(reply, err)
}
//弹出List最后条记录
func (m *RedisModule) RPOPListValue(key string) (string,error) {
// 弹出List最后条记录
func (m *RedisModule) RPOPListValue(key string) (string, error) {
conn, err := m.getConn()
if err != nil {
return "",err
return "", err
}
defer conn.Close()
@@ -783,7 +782,7 @@ func (m *RedisModule) LRange(key string, start, stop int) ([]byte, error) {
return makeListJson(reply.([]interface{}), false), nil
}
//弹出list(消息队列)数据,数据放入out fromLeft表示是否从左侧弹出 block表示是否阻塞 timeout表示阻塞超时
// 弹出list(消息队列)数据,数据放入out fromLeft表示是否从左侧弹出 block表示是否阻塞 timeout表示阻塞超时
func (m *RedisModule) ListPopJson(key string, fromLeft, block bool, timeout int, out interface{}) error {
b, err := m.ListPop(key, fromLeft, block, timeout)
if err != nil {
@@ -796,7 +795,7 @@ func (m *RedisModule) ListPopJson(key string, fromLeft, block bool, timeout int,
return nil
}
//弹出list(消息队列)数据 fromLeft表示是否从左侧弹出 block表示是否阻塞 timeout表示阻塞超时
// 弹出list(消息队列)数据 fromLeft表示是否从左侧弹出 block表示是否阻塞 timeout表示阻塞超时
func (m *RedisModule) ListPop(key string, fromLeft, block bool, timeout int) ([]byte, error) {
cmd := ""
if fromLeft {
@@ -838,7 +837,7 @@ func (m *RedisModule) ListPop(key string, fromLeft, block bool, timeout int) ([]
return b, nil
}
//有序集合插入Json
// 有序集合插入Json
func (m *RedisModule) ZADDInsertJson(key string, score float64, value interface{}) error {
conn, err := m.getConn()
@@ -858,7 +857,7 @@ func (m *RedisModule) ZADDInsertJson(key string, score float64, value interface{
return nil
}
//有序集合插入
// 有序集合插入
func (m *RedisModule) ZADDInsert(key string, score float64, Data interface{}) error {
conn, err := m.getConn()
if err != nil {
@@ -898,7 +897,7 @@ func (m *RedisModule) ZRangeJSON(key string, start, stop int, ascend bool, withS
return nil
}
//取有序set指定排名 ascend=true表示按升序遍历 否则按降序遍历
// 取有序set指定排名 ascend=true表示按升序遍历 否则按降序遍历
func (m *RedisModule) ZRange(key string, start, stop int, ascend bool, withScores bool) ([]byte, error) {
conn, err := m.getConn()
if err != nil {
@@ -922,7 +921,7 @@ func (m *RedisModule) ZRange(key string, start, stop int, ascend bool, withScore
return makeListJson(reply.([]interface{}), withScores), nil
}
//获取有序集合长度
// 获取有序集合长度
func (m *RedisModule) Zcard(key string) (int, error) {
conn, err := m.getConn()
if err != nil {
@@ -937,7 +936,7 @@ func (m *RedisModule) Zcard(key string) (int, error) {
return int(reply.(int64)), nil
}
//["123","234"]
// ["123","234"]
func makeListJson(redisReply []interface{}, withScores bool) []byte {
var buf bytes.Buffer
buf.WriteString("[")
@@ -1006,7 +1005,7 @@ func (m *RedisModule) ZRangeByScore(key string, start, stop float64, ascend bool
return makeListJson(reply.([]interface{}), withScores), nil
}
//获取指定member的排名
// 获取指定member的排名
func (m *RedisModule) ZScore(key string, member interface{}) (float64, error) {
conn, err := m.getConn()
if err != nil {
@@ -1022,7 +1021,7 @@ func (m *RedisModule) ZScore(key string, member interface{}) (float64, error) {
return redis.Float64(reply, err)
}
//获取指定member的排名
// 获取指定member的排名
func (m *RedisModule) ZRank(key string, member interface{}, ascend bool) (int, error) {
conn, err := m.getConn()
if err != nil {
@@ -1100,17 +1099,17 @@ func (m *RedisModule) HincrbyHashInt(redisKey, hashKey string, value int) error
func (m *RedisModule) EXPlREInsert(key string, TTl int) error {
conn, err := m.getConn()
if err != nil {
return err
return err
}
defer conn.Close()
_, err = conn.Do("expire", key, TTl)
if err != nil {
log.Error("expire fail,reason:%v", err)
return err
log.Error("expire fail,reason:%v", err)
return err
}
return nil
}
}
func (m *RedisModule) Zremrangebyrank(redisKey string, start, end interface{}) (int, error) {
conn, err := m.getConn()
@@ -1151,3 +1150,9 @@ func (m *RedisModule) Keys(key string) ([]string, error) {
}
return strs, nil
}
func (m *RedisModule) OnRelease() {
if m.redisPool != nil {
m.redisPool.Close()
}
}

View File

@@ -2,13 +2,12 @@ package httpservice
import (
"fmt"
"github.com/duanhf2012/origin/event"
"github.com/duanhf2012/origin/network"
"github.com/duanhf2012/origin/service"
"github.com/duanhf2012/origin/util/uuid"
"github.com/duanhf2012/origin/v2/event"
"github.com/duanhf2012/origin/v2/network"
"github.com/duanhf2012/origin/v2/service"
"github.com/duanhf2012/origin/v2/util/uuid"
jsoniter "github.com/json-iterator/go"
"io"
"io/ioutil"
"net/http"
"os"
"strings"
@@ -17,13 +16,13 @@ import (
var json = jsoniter.ConfigCompatibleWithStandardLibrary
var DefaultReadTimeout time.Duration = time.Second*10
var DefaultWriteTimeout time.Duration = time.Second*10
var DefaultProcessTimeout time.Duration = time.Second*10
var DefaultReadTimeout time.Duration = time.Second * 10
var DefaultWriteTimeout time.Duration = time.Second * 10
var DefaultProcessTimeout time.Duration = time.Second * 10
//http redirect
type HttpRedirectData struct {
Url string
Url string
CookieList []*http.Cookie
}
@@ -44,7 +43,7 @@ type routerMatchData struct {
}
type routerServeFileData struct {
matchUrl string
matchUrl string
localPath string
method HTTP_METHOD
}
@@ -56,45 +55,45 @@ type IHttpRouter interface {
SetServeFile(method HTTP_METHOD, urlpath string, dirname string) error
SetFormFileKey(formFileKey string)
GetFormFileKey()string
GetFormFileKey() string
AddHttpFiltrate(FiltrateFun HttpFiltrate) bool
}
type HttpRouter struct {
pathRouter map[HTTP_METHOD] map[string] routerMatchData //url地址对应本service地址
serveFileData map[string] *routerServeFileData
httpFiltrateList [] HttpFiltrate
pathRouter map[HTTP_METHOD]map[string]routerMatchData //url地址对应本service地址
serveFileData map[string]*routerServeFileData
httpFiltrateList []HttpFiltrate
formFileKey string
}
type HttpSession struct {
httpRouter IHttpRouter
r *http.Request
w http.ResponseWriter
r *http.Request
w http.ResponseWriter
//parse result
mapParam map[string]string
body []byte
body []byte
//processor result
statusCode int
msg []byte
fileData *routerServeFileData
statusCode int
msg []byte
fileData *routerServeFileData
redirectData *HttpRedirectData
sessionDone chan *HttpSession
sessionDone chan *HttpSession
}
type HttpService struct {
service.Service
httpServer network.HttpServer
postAliasUrl map[HTTP_METHOD] map[string]routerMatchData //url地址对应本service地址
httpRouter IHttpRouter
listenAddr string
corsHeader *CORSHeader
httpServer network.HttpServer
postAliasUrl map[HTTP_METHOD]map[string]routerMatchData //url地址对应本service地址
httpRouter IHttpRouter
listenAddr string
corsHeader *CORSHeader
processTimeout time.Duration
manualStart bool
}
type HttpFiltrate func(session *HttpSession) bool //true is pass
@@ -109,16 +108,20 @@ func (httpService *HttpService) AddFiltrate(FiltrateFun HttpFiltrate) bool {
func NewHttpHttpRouter() IHttpRouter {
httpRouter := &HttpRouter{}
httpRouter.pathRouter =map[HTTP_METHOD] map[string] routerMatchData{}
httpRouter.serveFileData = map[string] *routerServeFileData{}
httpRouter.pathRouter = map[HTTP_METHOD]map[string]routerMatchData{}
httpRouter.serveFileData = map[string]*routerServeFileData{}
httpRouter.formFileKey = "file"
for i:=METHOD_NONE+1;i<METHOD_INVALID;i++{
httpRouter.pathRouter[i] = map[string] routerMatchData{}
for i := METHOD_NONE + 1; i < METHOD_INVALID; i++ {
httpRouter.pathRouter[i] = map[string]routerMatchData{}
}
return httpRouter
}
func (slf *HttpSession) GetRawQuery() string{
return slf.r.URL.RawQuery
}
func (slf *HttpSession) Query(key string) (string, bool) {
if slf.mapParam == nil {
slf.mapParam = make(map[string]string)
@@ -137,7 +140,7 @@ func (slf *HttpSession) Query(key string) (string, bool) {
return ret, ok
}
func (slf *HttpSession) GetBody() []byte{
func (slf *HttpSession) GetBody() []byte {
return slf.body
}
@@ -145,19 +148,19 @@ func (slf *HttpSession) GetMethod() HTTP_METHOD {
return slf.getMethod(slf.r.Method)
}
func (slf *HttpSession) GetPath() string{
return strings.Trim(slf.r.URL.Path,"/")
func (slf *HttpSession) GetPath() string {
return strings.Trim(slf.r.URL.Path, "/")
}
func (slf *HttpSession) SetHeader(key, value string) {
slf.w.Header().Set(key,value)
slf.w.Header().Set(key, value)
}
func (slf *HttpSession) AddHeader(key, value string) {
slf.w.Header().Add(key,value)
slf.w.Header().Add(key, value)
}
func (slf *HttpSession) GetHeader(key string) string{
func (slf *HttpSession) GetHeader(key string) string {
return slf.r.Header.Get(key)
}
@@ -165,7 +168,7 @@ func (slf *HttpSession) DelHeader(key string) {
slf.r.Header.Del(key)
}
func (slf *HttpSession) WriteStatusCode(statusCode int){
func (slf *HttpSession) WriteStatusCode(statusCode int) {
slf.statusCode = statusCode
}
@@ -173,7 +176,7 @@ func (slf *HttpSession) Write(msg []byte) {
slf.msg = msg
}
func (slf *HttpSession) WriteJsonDone(statusCode int,msgJson interface{}) error {
func (slf *HttpSession) WriteJsonDone(statusCode int, msgJson interface{}) error {
msg, err := json.Marshal(msgJson)
if err != nil {
return err
@@ -187,12 +190,12 @@ func (slf *HttpSession) WriteJsonDone(statusCode int,msgJson interface{}) error
func (slf *HttpSession) flush() {
slf.w.WriteHeader(slf.statusCode)
if slf.msg!=nil {
if slf.msg != nil {
slf.w.Write(slf.msg)
}
}
func (slf *HttpSession) Done(){
func (slf *HttpSession) Done() {
slf.sessionDone <- slf
}
@@ -219,15 +222,15 @@ func (slf *HttpRouter) analysisRouterUrl(url string) (string, error) {
return strings.Trim(url, "/"), nil
}
func (slf *HttpSession) Handle(){
slf.httpRouter.Router(slf)
func (slf *HttpSession) Handle() {
slf.httpRouter.Router(slf)
}
func (slf *HttpRouter) SetFormFileKey(formFileKey string){
func (slf *HttpRouter) SetFormFileKey(formFileKey string) {
slf.formFileKey = formFileKey
}
func (slf *HttpRouter) GetFormFileKey()string{
func (slf *HttpRouter) GetFormFileKey() string {
return slf.formFileKey
}
@@ -239,19 +242,19 @@ func (slf *HttpRouter) POST(url string, handle HttpHandle) bool {
return slf.regRouter(METHOD_POST, url, handle)
}
func (slf *HttpRouter) regRouter(method HTTP_METHOD, url string, handle HttpHandle) bool{
mapRouter,ok := slf.pathRouter[method]
if ok == false{
func (slf *HttpRouter) regRouter(method HTTP_METHOD, url string, handle HttpHandle) bool {
mapRouter, ok := slf.pathRouter[method]
if ok == false {
return false
}
mapRouter[strings.Trim(url,"/")] = routerMatchData{httpHandle:handle}
mapRouter[strings.Trim(url, "/")] = routerMatchData{httpHandle: handle}
return true
}
func (slf *HttpRouter) Router(session *HttpSession){
if slf.httpFiltrateList!=nil {
for _,fun := range slf.httpFiltrateList{
func (slf *HttpRouter) Router(session *HttpSession) {
if slf.httpFiltrateList != nil {
for _, fun := range slf.httpFiltrateList {
if fun(session) == false {
//session.done()
return
@@ -288,13 +291,13 @@ func (slf *HttpRouter) Router(session *HttpSession){
session.Done()
}
func (httpService *HttpService) HttpEventHandler(ev event.IEvent) {
func (httpService *HttpService) HttpEventHandler(ev event.IEvent) {
ev.(*event.Event).Data.(*HttpSession).Handle()
}
func (httpService *HttpService) SetHttpRouter(httpRouter IHttpRouter,eventHandler event.IEventHandler) {
func (httpService *HttpService) SetHttpRouter(httpRouter IHttpRouter, eventHandler event.IEventHandler) {
httpService.httpRouter = httpRouter
httpService.RegEventReceiverFunc(event.Sys_Event_Http_Event,eventHandler, httpService.HttpEventHandler)
httpService.RegEventReceiverFunc(event.Sys_Event_Http_Event, eventHandler, httpService.HttpEventHandler)
}
func (slf *HttpRouter) SetServeFile(method HTTP_METHOD, urlpath string, dirname string) error {
@@ -349,68 +352,84 @@ func (httpService *HttpService) OnInit() error {
if iConfig == nil {
return fmt.Errorf("%s service config is error!", httpService.GetName())
}
tcpCfg := iConfig.(map[string]interface{})
addr,ok := tcpCfg["ListenAddr"]
httpCfg := iConfig.(map[string]interface{})
addr, ok := httpCfg["ListenAddr"]
if ok == false {
return fmt.Errorf("%s service config is error!", httpService.GetName())
}
var readTimeout time.Duration = DefaultReadTimeout
var writeTimeout time.Duration = DefaultWriteTimeout
if cfgRead,ok := tcpCfg["ReadTimeout"];ok == true {
readTimeout = time.Duration(cfgRead.(float64))*time.Millisecond
if cfgRead, ok := httpCfg["ReadTimeout"]; ok == true {
readTimeout = time.Duration(cfgRead.(float64)) * time.Millisecond
}
if cfgWrite,ok := tcpCfg["WriteTimeout"];ok == true {
writeTimeout = time.Duration(cfgWrite.(float64))*time.Millisecond
if cfgWrite, ok := httpCfg["WriteTimeout"]; ok == true {
writeTimeout = time.Duration(cfgWrite.(float64)) * time.Millisecond
}
if manualStart, ok := httpCfg["ManualStart"]; ok == true {
httpService.manualStart = manualStart.(bool)
}else{
manualStart =false
}
httpService.processTimeout = DefaultProcessTimeout
if cfgProcessTimeout,ok := tcpCfg["ProcessTimeout"];ok == true {
httpService.processTimeout = time.Duration(cfgProcessTimeout.(float64))*time.Millisecond
if cfgProcessTimeout, ok := httpCfg["ProcessTimeout"]; ok == true {
httpService.processTimeout = time.Duration(cfgProcessTimeout.(float64)) * time.Millisecond
}
httpService.httpServer.Init(addr.(string), httpService, readTimeout, writeTimeout)
//Set CAFile
caFileList,ok := tcpCfg["CAFile"]
caFileList, ok := httpCfg["CAFile"]
if ok == false {
return nil
}
iCaList := caFileList.([]interface{})
var caFile [] network.CAFile
for _,i := range iCaList {
var caFile []network.CAFile
for _, i := range iCaList {
mapCAFile := i.(map[string]interface{})
c,ok := mapCAFile["Certfile"]
if ok == false{
c, ok := mapCAFile["Certfile"]
if ok == false {
continue
}
k,ok := mapCAFile["Keyfile"]
if ok == false{
k, ok := mapCAFile["Keyfile"]
if ok == false {
continue
}
if c.(string)!="" && k.(string)!="" {
caFile = append(caFile,network.CAFile{
CertFile: c.(string),
if c.(string) != "" && k.(string) != "" {
caFile = append(caFile, network.CAFile{
CertFile: c.(string),
Keyfile: k.(string),
})
}
}
httpService.httpServer.SetCAFile(caFile)
httpService.httpServer.Start()
if httpService.manualStart == false {
httpService.httpServer.Start()
}
return nil
}
func (httpService *HttpService) StartListen() {
if httpService.manualStart {
httpService.httpServer.Start()
}
}
func (httpService *HttpService) SetAllowCORS(corsHeader *CORSHeader) {
httpService.corsHeader = corsHeader
}
func (httpService *HttpService) ProcessFile(session *HttpSession){
func (httpService *HttpService) ProcessFile(session *HttpSession) {
uPath := session.r.URL.Path
idx := strings.Index(uPath, session.fileData.matchUrl)
subPath := strings.Trim(uPath[idx+len(session.fileData.matchUrl):], "/")
destLocalPath := session.fileData.localPath + "/"+subPath
destLocalPath := session.fileData.localPath + "/" + subPath
switch session.GetMethod() {
case METHOD_GET:
@@ -454,29 +473,29 @@ func (httpService *HttpService) ProcessFile(session *HttpSession){
defer localFd.Close()
io.Copy(localFd, resourceFile)
session.WriteStatusCode(http.StatusOK)
session.Write([]byte(uPath+"/"+fileName))
session.Write([]byte(uPath + "/" + fileName))
session.flush()
}
}
func NewAllowCORSHeader() *CORSHeader{
func NewAllowCORSHeader() *CORSHeader {
header := &CORSHeader{}
header.AllowCORSHeader = map[string][]string{}
header.AllowCORSHeader["Access-Control-Allow-Origin"] = []string{"*"}
header.AllowCORSHeader["Access-Control-Allow-Methods"] =[]string{ "POST, GET, OPTIONS, PUT, DELETE"}
header.AllowCORSHeader["Access-Control-Allow-Methods"] = []string{"POST, GET, OPTIONS, PUT, DELETE"}
header.AllowCORSHeader["Access-Control-Allow-Headers"] = []string{"Content-Type"}
return header
}
func (slf *CORSHeader) AddAllowHeader(key string,val string){
slf.AllowCORSHeader["Access-Control-Allow-Headers"] = append(slf.AllowCORSHeader["Access-Control-Allow-Headers"],fmt.Sprintf("%s,%s",key,val))
func (slf *CORSHeader) AddAllowHeader(key string, val string) {
slf.AllowCORSHeader["Access-Control-Allow-Headers"] = append(slf.AllowCORSHeader["Access-Control-Allow-Headers"], fmt.Sprintf("%s,%s", key, val))
}
func (slf *CORSHeader) copyTo(header http.Header){
for k,v := range slf.AllowCORSHeader{
for _,val := range v{
header.Add(k,val)
func (slf *CORSHeader) copyTo(header http.Header) {
for k, v := range slf.AllowCORSHeader {
for _, val := range v {
header.Add(k, val)
}
}
}
@@ -491,12 +510,12 @@ func (httpService *HttpService) ServeHTTP(w http.ResponseWriter, r *http.Request
return
}
session := &HttpSession{sessionDone:make(chan *HttpSession,1),httpRouter:httpService.httpRouter,statusCode:http.StatusOK}
session := &HttpSession{sessionDone: make(chan *HttpSession, 1), httpRouter: httpService.httpRouter, statusCode: http.StatusOK}
session.r = r
session.w = w
defer r.Body.Close()
body, err := ioutil.ReadAll(r.Body)
body, err := io.ReadAll(r.Body)
if err != nil {
session.WriteStatusCode(http.StatusGatewayTimeout)
session.flush()
@@ -504,19 +523,19 @@ func (httpService *HttpService) ServeHTTP(w http.ResponseWriter, r *http.Request
}
session.body = body
httpService.GetEventHandler().NotifyEvent(&event.Event{Type:event.Sys_Event_Http_Event,Data:session})
httpService.GetEventHandler().NotifyEvent(&event.Event{Type: event.Sys_Event_Http_Event, Data: session})
ticker := time.NewTicker(httpService.processTimeout)
select {
case <-ticker.C:
session.WriteStatusCode(http.StatusGatewayTimeout)
session.flush()
break
case <- session.sessionDone:
if session.fileData!=nil {
case <-session.sessionDone:
if session.fileData != nil {
httpService.ProcessFile(session)
}else if session.redirectData!=nil {
} else if session.redirectData != nil {
session.redirects()
}else{
} else {
session.flush()
}
}

View File

@@ -0,0 +1,231 @@
package messagequeueservice
import (
"errors"
"fmt"
"github.com/duanhf2012/origin/v2/cluster"
"github.com/duanhf2012/origin/v2/log"
"github.com/duanhf2012/origin/v2/rpc"
"github.com/duanhf2012/origin/v2/util/coroutine"
"strings"
"sync/atomic"
"time"
)
type CustomerSubscriber struct {
rpc.IRpcHandler
topic string
subscriber *Subscriber
fromNodeId string
callBackRpcMethod string
serviceName string
StartIndex uint64
oneBatchQuantity int32
subscribeMethod SubscribeMethod
customerId string
isStop int32 //退出标记
topicCache []TopicData // 从消息队列中取出来的消息的缓存
}
const DefaultOneBatchQuantity = 1000
type SubscribeMethod = int32
const (
MethodCustom SubscribeMethod = 0 //自定义模式以消费者设置的StartIndex开始获取或订阅
MethodLast SubscribeMethod = 1 //Last模式以该消费者上次记录的位置开始订阅
)
func (cs *CustomerSubscriber) trySetSubscriberBaseInfo(rpcHandler rpc.IRpcHandler, ss *Subscriber, topic string, subscribeMethod SubscribeMethod, customerId string, fromNodeId string, callBackRpcMethod string, startIndex uint64, oneBatchQuantity int32) error {
cs.subscriber = ss
cs.fromNodeId = fromNodeId
cs.callBackRpcMethod = callBackRpcMethod
//cs.StartIndex = startIndex
cs.subscribeMethod = subscribeMethod
cs.customerId = customerId
cs.StartIndex = startIndex
cs.topic = topic
cs.IRpcHandler = rpcHandler
if oneBatchQuantity == 0 {
cs.oneBatchQuantity = DefaultOneBatchQuantity
} else {
cs.oneBatchQuantity = oneBatchQuantity
}
strRpcMethod := strings.Split(callBackRpcMethod, ".")
if len(strRpcMethod) != 2 {
err := errors.New("RpcMethod " + callBackRpcMethod + " is error")
log.SError(err.Error())
return err
}
cs.serviceName = strRpcMethod[0]
if cluster.HasService(fromNodeId, cs.serviceName) == false {
err := fmt.Errorf("nodeId %d cannot found %s", fromNodeId, cs.serviceName)
log.SError(err.Error())
return err
}
if cluster.GetCluster().IsNodeConnected(fromNodeId) == false {
err := fmt.Errorf("nodeId %d is disconnect", fromNodeId)
log.SError(err.Error())
return err
}
if startIndex == 0 {
now := time.Now()
zeroTime := time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, now.Location())
//fmt.Println(zeroTime.Unix())
cs.StartIndex = uint64(zeroTime.Unix() << 32)
}
cs.topicCache = make([]TopicData, oneBatchQuantity)
return nil
}
// 开始订阅
func (cs *CustomerSubscriber) Subscribe(rpcHandler rpc.IRpcHandler, ss *Subscriber, topic string, subscribeMethod SubscribeMethod, customerId string, fromNodeId string, callBackRpcMethod string, startIndex uint64, oneBatchQuantity int32) error {
err := cs.trySetSubscriberBaseInfo(rpcHandler, ss, topic, subscribeMethod, customerId, fromNodeId, callBackRpcMethod, startIndex, oneBatchQuantity)
if err != nil {
return err
}
cs.subscriber.queueWait.Add(1)
coroutine.GoRecover(cs.SubscribeRun, -1)
return nil
}
// 取消订阅
func (cs *CustomerSubscriber) UnSubscribe() {
atomic.StoreInt32(&cs.isStop, 1)
}
func (cs *CustomerSubscriber) LoadLastIndex() {
for {
if atomic.LoadInt32(&cs.isStop) != 0 {
log.Info("topic ", cs.topic, " out of subscription")
break
}
log.Info("customer ", cs.customerId, " start load last index ")
lastIndex, ret := cs.subscriber.dataPersist.LoadCustomerIndex(cs.topic, cs.customerId)
if ret == true {
if lastIndex > 0 {
cs.StartIndex = lastIndex
} else {
//否则直接使用客户端发回来的
}
log.Info("customer ", cs.customerId, " load finish,start index is ", cs.StartIndex)
break
}
log.Info("customer ", cs.customerId, " load last index is fail...")
time.Sleep(5 * time.Second)
}
}
func (cs *CustomerSubscriber) SubscribeRun() {
defer cs.subscriber.queueWait.Done()
log.Info("topic ", cs.topic, " start subscription")
//加载之前的位置
if cs.subscribeMethod == MethodLast {
cs.LoadLastIndex()
}
for {
if atomic.LoadInt32(&cs.isStop) != 0 {
log.Info("topic ", cs.topic, " out of subscription")
break
}
if cs.checkCustomerIsValid() == false {
break
}
//todo 检测退出
if cs.subscribe() == false {
log.Info("topic ", cs.topic, " out of subscription")
break
}
}
//删除订阅关系
cs.subscriber.removeCustomer(cs.customerId, cs)
log.Info("topic ", cs.topic, " unsubscription")
}
func (cs *CustomerSubscriber) subscribe() bool {
//先从内存中查找
topicData, ret := cs.subscriber.queue.FindData(cs.StartIndex+1, cs.oneBatchQuantity, cs.topicCache[:0])
if ret == true {
cs.publishToCustomer(topicData)
return true
}
//从持久化数据中来找
topicData = cs.subscriber.dataPersist.FindTopicData(cs.topic, cs.StartIndex, int64(cs.oneBatchQuantity),cs.topicCache[:0])
return cs.publishToCustomer(topicData)
}
func (cs *CustomerSubscriber) checkCustomerIsValid() bool {
//1.检查nodeid是否在线不在线直接取消订阅
if cluster.GetCluster().IsNodeConnected(cs.fromNodeId) == false {
return false
}
//2.验证是否有该服务,如果没有则退出
if cluster.HasService(cs.fromNodeId, cs.serviceName) == false {
return false
}
return true
}
func (cs *CustomerSubscriber) publishToCustomer(topicData []TopicData) bool {
if cs.checkCustomerIsValid() == false {
return false
}
if len(topicData) == 0 {
//没有任何数据待一秒吧
time.Sleep(time.Second * 1)
return true
}
//3.发送失败重试发送
var dbQueuePublishReq rpc.DBQueuePublishReq
var dbQueuePushRes rpc.DBQueuePublishRes
dbQueuePublishReq.TopicName = cs.topic
cs.subscriber.dataPersist.OnPushTopicDataToCustomer(cs.topic, topicData)
for i := 0; i < len(topicData); i++ {
dbQueuePublishReq.PushData = append(dbQueuePublishReq.PushData, topicData[i].RawData)
}
for {
if atomic.LoadInt32(&cs.isStop) != 0 {
break
}
if cs.checkCustomerIsValid() == false {
return false
}
//推送数据
err := cs.CallNodeWithTimeout(4*time.Minute,cs.fromNodeId, cs.callBackRpcMethod, &dbQueuePublishReq, &dbQueuePushRes)
if err != nil {
time.Sleep(time.Second * 1)
continue
}
//持久化进度
endIndex := cs.subscriber.dataPersist.GetIndex(&topicData[len(topicData)-1])
cs.StartIndex = endIndex
cs.subscriber.dataPersist.PersistIndex(cs.topic, cs.customerId, endIndex)
return true
}
return true
}

View File

@@ -0,0 +1,108 @@
package messagequeueservice
import (
"github.com/duanhf2012/origin/v2/util/algorithms"
"sync"
)
type MemoryQueue struct {
subscriber *Subscriber
topicQueue []TopicData
head int32
tail int32
locker sync.RWMutex
}
func (mq *MemoryQueue) Init(cap int32) {
mq.topicQueue = make([]TopicData, cap+1)
}
// 从队尾Push数据
func (mq *MemoryQueue) Push(topicData *TopicData) bool {
mq.locker.Lock()
defer mq.locker.Unlock()
nextPos := (mq.tail + 1) % int32(len(mq.topicQueue))
//如果队列满了
if nextPos == mq.head {
//将对首的数据删除掉
mq.head++
mq.head = mq.head % int32(len(mq.topicQueue))
}
mq.tail = nextPos
mq.topicQueue[mq.tail] = *topicData
return true
}
func (mq *MemoryQueue) findData(startPos int32, startIndex uint64, limit int32) ([]TopicData, bool) {
//空队列,无数据
if mq.head == mq.tail {
return nil, true
}
var findStartPos int32
var findEndPos int32
findStartPos = startPos //(mq.head + 1) % cap(mq.topicQueue)
if findStartPos <= mq.tail {
findEndPos = mq.tail + 1
} else {
findEndPos = int32(len(mq.topicQueue))
}
if findStartPos >= findEndPos {
return nil, false
}
// 要取的Seq 比内存中最小的数据的Seq还小那么需要返回错误
if mq.topicQueue[findStartPos].Seq > startIndex {
return nil, false
}
//二分查找位置
pos := int32(algorithms.BiSearch(mq.topicQueue[findStartPos:findEndPos], startIndex, 1))
if pos == -1 {
return nil, false
}
pos += findStartPos
//取得结束位置
endPos := limit + pos
if endPos > findEndPos {
endPos = findEndPos
}
return mq.topicQueue[pos:endPos], true
}
// FindData 返回参数[]TopicData 表示查找到的数据nil表示无数据。bool表示是否不应该在内存中来查
func (mq *MemoryQueue) FindData(startIndex uint64, limit int32, dataQueue []TopicData) ([]TopicData, bool) {
mq.locker.RLock()
defer mq.locker.RUnlock()
//队列为空时,应该从数据库查找
if mq.head == mq.tail {
return nil, false
} else if mq.head < mq.tail {
// 队列没有折叠
datas,ret := mq.findData(mq.head + 1, startIndex, limit)
if ret {
dataQueue = append(dataQueue, datas...)
}
return dataQueue, ret
} else {
// 折叠先找后面的部分
datas,ret := mq.findData(mq.head+1, startIndex, limit)
if ret {
dataQueue = append(dataQueue, datas...)
return dataQueue, ret
}
// 后面没找到,从前面开始找
datas,ret = mq.findData(0, startIndex, limit)
dataQueue = append(dataQueue, datas...)
return dataQueue, ret
}
}

View File

@@ -0,0 +1,36 @@
package messagequeueservice
import (
"fmt"
"testing"
)
type In int
func (i In) GetValue() int {
return int(i)
}
func Test_BiSearch(t *testing.T) {
var memQueue MemoryQueue
memQueue.Init(5)
for i := 1; i <= 8; i++ {
memQueue.Push(&TopicData{Seq: uint64(i)})
}
startindex := uint64(0)
for {
retData, ret := memQueue.FindData(startindex+1, 10)
fmt.Println(retData, ret)
for _, d := range retData {
if d.Seq > startindex {
startindex = d.Seq
}
}
if ret == false {
break
}
}
}

View File

@@ -0,0 +1,126 @@
package messagequeueservice
import (
"errors"
"fmt"
"github.com/duanhf2012/origin/v2/log"
"github.com/duanhf2012/origin/v2/service"
"github.com/duanhf2012/origin/v2/rpc"
"sync"
)
type QueueDataPersist interface {
service.IModule
OnExit()
OnReceiveTopicData(topic string, topicData []TopicData) //当收到推送过来的数据时
OnPushTopicDataToCustomer(topic string, topicData []TopicData) //当推送数据到Customer时回调
PersistTopicData(topic string, topicData []TopicData, retryCount int) ([]TopicData, []TopicData, bool) //持久化数据,失败则返回false上层会重复尝试直到成功建议在函数中加入次数超过次数则返回true
FindTopicData(topic string, startIndex uint64, limit int64, topicBuff []TopicData) []TopicData //查找数据,参数bool代表数据库查找是否成功
LoadCustomerIndex(topic string, customerId string) (uint64, bool) //false时代表获取失败一般是读取错误会进行重试。如果不存在时返回(0,true)
GetIndex(topicData *TopicData) uint64 //通过topic数据获取进度索引号
PersistIndex(topic string, customerId string, index uint64) //持久化进度索引号
}
type MessageQueueService struct {
service.Service
sync.Mutex
mapTopicRoom map[string]*TopicRoom
queueWait sync.WaitGroup
dataPersist QueueDataPersist
memoryQueueLen int32
maxProcessTopicBacklogNum int32 //最大积压的数据量因为是写入到channel中然后由协程取出再持久化,不设置有默认值100000
}
func (ms *MessageQueueService) OnInit() error {
ms.mapTopicRoom = map[string]*TopicRoom{}
errC := ms.ReadCfg()
if errC != nil {
return errC
}
if ms.dataPersist == nil {
return errors.New("not setup QueueDataPersist.")
}
_, err := ms.AddModule(ms.dataPersist)
if err != nil {
return err
}
return nil
}
func (ms *MessageQueueService) ReadCfg() error {
mapDBServiceCfg, ok := ms.GetService().GetServiceCfg().(map[string]interface{})
if ok == false {
return fmt.Errorf("MessageQueueService config is error")
}
maxProcessTopicBacklogNum, ok := mapDBServiceCfg["MaxProcessTopicBacklogNum"]
if ok == false {
ms.maxProcessTopicBacklogNum = DefaultMaxTopicBacklogNum
log.Info("MaxProcessTopicBacklogNum config is set to the default value of ", maxProcessTopicBacklogNum)
} else {
ms.maxProcessTopicBacklogNum = int32(maxProcessTopicBacklogNum.(float64))
}
memoryQueueLen, ok := mapDBServiceCfg["MemoryQueueLen"]
if ok == false {
ms.memoryQueueLen = DefaultMemoryQueueLen
log.Info("MemoryQueueLen config is set to the default value of ", DefaultMemoryQueueLen)
} else {
ms.memoryQueueLen = int32(memoryQueueLen.(float64))
}
return nil
}
func (ms *MessageQueueService) Setup(dataPersist QueueDataPersist) {
ms.dataPersist = dataPersist
}
func (ms *MessageQueueService) OnRelease() {
//停止所有的TopicRoom房间
ms.Lock()
for _, room := range ms.mapTopicRoom {
room.Stop()
}
ms.Unlock()
//释放时确保所有的协程退出
ms.queueWait.Wait()
//通知持久化对象
ms.dataPersist.OnExit()
}
func (ms *MessageQueueService) GetTopicRoom(topic string) *TopicRoom {
ms.Lock()
defer ms.Unlock()
topicRoom := ms.mapTopicRoom[topic]
if topicRoom != nil {
return topicRoom
}
topicRoom = &TopicRoom{}
topicRoom.Init(ms.maxProcessTopicBacklogNum, ms.memoryQueueLen, topic, &ms.queueWait, ms.dataPersist)
ms.mapTopicRoom[topic] = topicRoom
return topicRoom
}
func (ms *MessageQueueService) RPC_Publish(inParam *rpc.DBQueuePublishReq, outParam *rpc.DBQueuePublishRes) error {
topicRoom := ms.GetTopicRoom(inParam.TopicName)
return topicRoom.Publish(inParam.PushData)
}
func (ms *MessageQueueService) RPC_Subscribe(req *rpc.DBQueueSubscribeReq, res *rpc.DBQueueSubscribeRes) error {
topicRoom := ms.GetTopicRoom(req.TopicName)
return topicRoom.TopicSubscribe(ms.GetRpcHandler(), req.SubType, int32(req.Method), req.FromNodeId, req.RpcMethod, req.TopicName, req.CustomerId, req.StartIndex, req.OneBatchQuantity)
}

View File

@@ -0,0 +1,425 @@
package messagequeueservice
import (
"errors"
"fmt"
"github.com/duanhf2012/origin/v2/log"
"github.com/duanhf2012/origin/v2/service"
"github.com/duanhf2012/origin/v2/sysmodule/mongodbmodule"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo/options"
"time"
)
const MaxDays = 180
type DataType interface {
int | uint | int64 | uint64 | float32 | float64 | int32 | uint32 | int16 | uint16
}
func convertToNumber[DType DataType](val interface{}) (error, DType) {
switch val.(type) {
case int64:
return nil, DType(val.(int64))
case int:
return nil, DType(val.(int))
case uint:
return nil, DType(val.(uint))
case uint64:
return nil, DType(val.(uint64))
case float32:
return nil, DType(val.(float32))
case float64:
return nil, DType(val.(float64))
case int32:
return nil, DType(val.(int32))
case uint32:
return nil, DType(val.(uint32))
case int16:
return nil, DType(val.(int16))
case uint16:
return nil, DType(val.(uint16))
}
return errors.New("unsupported type"), 0
}
type MongoPersist struct {
service.Module
mongo mongodbmodule.MongoModule
url string //连接url
dbName string //数据库名称
retryCount int //落地数据库重试次数
}
const CustomerCollectName = "SysCustomer"
func (mp *MongoPersist) OnInit() error {
if errC := mp.ReadCfg(); errC != nil {
return errC
}
err := mp.mongo.Init(mp.url, time.Second*15)
if err != nil {
return err
}
err = mp.mongo.Start()
if err != nil {
log.SError("start dbService[", mp.dbName, "], url[", mp.url, "] init error:", err.Error())
return err
}
//添加索引
var IndexKey [][]string
var keys []string
keys = append(keys, "Customer", "Topic")
IndexKey = append(IndexKey, keys)
s := mp.mongo.TakeSession()
if err := s.EnsureUniqueIndex(mp.dbName, CustomerCollectName, IndexKey, true, true,true); err != nil {
log.SError("EnsureUniqueIndex is fail ", err.Error())
return err
}
return nil
}
func (mp *MongoPersist) ReadCfg() error {
mapDBServiceCfg, ok := mp.GetService().GetServiceCfg().(map[string]interface{})
if ok == false {
return fmt.Errorf("MessageQueueService config is error")
}
//parse MsgRouter
url, ok := mapDBServiceCfg["Url"]
if ok == false {
return fmt.Errorf("MessageQueueService config is error")
}
mp.url = url.(string)
dbName, ok := mapDBServiceCfg["DBName"]
if ok == false {
return fmt.Errorf("MessageQueueService config is error")
}
mp.dbName = dbName.(string)
//
goroutineNum, ok := mapDBServiceCfg["RetryCount"]
if ok == false {
return fmt.Errorf("MongoPersist config is error")
}
mp.retryCount = int(goroutineNum.(float64))
return nil
}
func (mp *MongoPersist) OnExit() {
}
// OnReceiveTopicData 当收到推送过来的数据时
func (mp *MongoPersist) OnReceiveTopicData(topic string, topicData []TopicData) {
//1.收到推送过来的数据在里面插入_id字段
for i := 0; i < len(topicData); i++ {
var document bson.D
err := bson.Unmarshal(topicData[i].RawData, &document)
if err != nil {
topicData[i].RawData = nil
log.SError(topic, " data Unmarshal is fail ", err.Error())
continue
}
document = append(document, bson.E{Key: "_id", Value: topicData[i].Seq})
byteRet, err := bson.Marshal(document)
if err != nil {
topicData[i].RawData = nil
log.SError(topic, " data Marshal is fail ", err.Error())
continue
}
topicData[i].ExtendParam = document
topicData[i].RawData = byteRet
}
}
// OnPushTopicDataToCustomer 当推送数据到Customer时回调
func (mp *MongoPersist) OnPushTopicDataToCustomer(topic string, topicData []TopicData) {
}
// PersistTopicData 持久化数据
func (mp *MongoPersist) persistTopicData(collectionName string, topicData []TopicData, retryCount int) bool {
s := mp.mongo.TakeSession()
ctx, cancel := s.GetDefaultContext()
defer cancel()
var documents []interface{}
for _, tData := range topicData {
if tData.ExtendParam == nil {
continue
}
documents = append(documents, tData.ExtendParam)
}
_, err := s.Collection(mp.dbName, collectionName).InsertMany(ctx, documents)
if err != nil {
log.SError("PersistTopicData InsertMany fail,collect name is ", collectionName," error:",err.Error())
//失败最大重试数量
return retryCount >= mp.retryCount
}
return true
}
func (mp *MongoPersist) IsSameDay(timestamp1 int64,timestamp2 int64) bool{
t1 := time.Unix(timestamp1, 0)
t2 := time.Unix(timestamp2, 0)
return t1.Year() == t2.Year() && t1.Month() == t2.Month()&&t1.Day() == t2.Day()
}
// PersistTopicData 持久化数据
func (mp *MongoPersist) PersistTopicData(topic string, topicData []TopicData, retryCount int) ([]TopicData, []TopicData, bool) {
if len(topicData) == 0 {
return nil, nil,true
}
preDate := topicData[0].Seq >> 32
var findPos int
for findPos = 1; findPos < len(topicData); findPos++ {
newDate := topicData[findPos].Seq >> 32
//说明换天了
if mp.IsSameDay(int64(preDate),int64(newDate)) == false {
break
}
}
collectName := fmt.Sprintf("%s_%s", topic, mp.GetDateByIndex(topicData[0].Seq))
ret := mp.persistTopicData(collectName, topicData[:findPos], retryCount)
//如果失败,下次重试
if ret == false {
return nil, nil, false
}
//如果成功
return topicData[findPos:len(topicData)], topicData[0:findPos], true
}
// FindTopicData 查找数据
func (mp *MongoPersist) findTopicData(topic string, startIndex uint64, limit int64,topicBuff []TopicData) ([]TopicData, bool) {
s := mp.mongo.TakeSession()
condition := bson.D{{Key: "_id", Value: bson.D{{Key: "$gt", Value: startIndex}}}}
var findOption options.FindOptions
findOption.SetLimit(limit)
var findOptions []*options.FindOptions
findOptions = append(findOptions, &findOption)
ctx, cancel := s.GetDefaultContext()
defer cancel()
collectName := fmt.Sprintf("%s_%s", topic, mp.GetDateByIndex(startIndex))
cursor, err := s.Collection(mp.dbName, collectName).Find(ctx, condition, findOptions...)
if err != nil || cursor.Err() != nil {
if err == nil {
err = cursor.Err()
}
if err != nil {
log.SError("find collect name ", topic, " is error:", err.Error())
return nil, false
}
return nil, false
}
var res []interface{}
ctxAll, cancelAll := s.GetDefaultContext()
defer cancelAll()
err = cursor.All(ctxAll, &res)
if err != nil {
if err != nil {
log.SError("find collect name ", topic, " is error:", err.Error())
return nil, false
}
return nil, false
}
//序列化返回
for i := 0; i < len(res); i++ {
rawData, errM := bson.Marshal(res[i])
if errM != nil {
if errM != nil {
log.SError("collect name ", topic, " Marshal is error:", err.Error())
return nil, false
}
continue
}
topicBuff = append(topicBuff, TopicData{RawData: rawData})
}
return topicBuff, true
}
func (mp *MongoPersist) IsYesterday(startIndex uint64) (bool,string){
timeStamp := int64(startIndex>>32)
startTime := time.Unix(timeStamp, 0).AddDate(0,0,1)
nowTm := time.Now()
return startTime.Year() == nowTm.Year() && startTime.Month() == nowTm.Month()&&startTime.Day() == nowTm.Day(),nowTm.Format("20060102")
}
func (mp *MongoPersist) getCollectCount(topic string,today string) (int64 ,error){
s := mp.mongo.TakeSession()
ctx, cancel := s.GetDefaultContext()
defer cancel()
collectName := fmt.Sprintf("%s_%s", topic, today)
count, err := s.Collection(mp.dbName, collectName).EstimatedDocumentCount(ctx)
return count,err
}
// FindTopicData 查找数据
func (mp *MongoPersist) FindTopicData(topic string, startIndex uint64, limit int64,topicBuff []TopicData) []TopicData {
//某表找不到,一直往前找,找到当前置为止
for days := 1; days <= MaxDays; days++ {
//是否可以跳天
//在换天时如果记录在其他协程还没insert完成时因为没查到直接跳到第二天导致漏掉数据
//解决的方法是在换天时,先判断新换的当天有没有记录,有记录时,说明昨天的数据已经插入完成,才可以跳天查询
IsJumpDays := true
//如果是昨天,先判断当天有没有表数据
bYesterday,strToday := mp.IsYesterday(startIndex)
if bYesterday {
count,err := mp.getCollectCount(topic,strToday)
if err != nil {
//失败时,重新开始
log.SError("getCollectCount ",topic,"_",strToday," is fail:",err.Error())
return nil
}
//当天没有记录,则不能跳表,有可能当天还有数据
if count == 0 {
IsJumpDays = false
}
}
//从startIndex开始一直往后查
topicData, isSucc := mp.findTopicData(topic, startIndex, limit,topicBuff)
//有数据或者数据库出错时返回,返回后,会进行下一轮的查询遍历
if len(topicData) > 0 || isSucc == false {
return topicData
}
//找不到数据时,判断当前日期是否一致
if mp.GetDateByIndex(startIndex) >= mp.GetNowTime() {
break
}
//不允许跳天,则直接跳出
if IsJumpDays == false {
break
}
startIndex = mp.GetNextIndex(startIndex, days)
}
return nil
}
func (mp *MongoPersist) GetNowTime() string {
now := time.Now()
zeroTime := time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, now.Location())
return zeroTime.Format("20060102")
}
func (mp *MongoPersist) GetDateByIndex(startIndex uint64) string {
startTm := int64(startIndex >> 32)
return time.Unix(startTm, 0).Format("20060102")
}
func (mp *MongoPersist) GetNextIndex(startIndex uint64, addDay int) uint64 {
startTime := time.Unix(int64(startIndex>>32), 0)
dateTime := time.Date(startTime.Year(), startTime.Month(), startTime.Day(), 0, 0, 0, 0, startTime.Location())
newDateTime := dateTime.AddDate(0, 0, addDay)
nextIndex := uint64(newDateTime.Unix()) << 32
return nextIndex
}
// LoadCustomerIndex false时代表获取失败一般是读取错误会进行重试。如果不存在时返回(0,true)
func (mp *MongoPersist) LoadCustomerIndex(topic string, customerId string) (uint64, bool) {
s := mp.mongo.TakeSession()
ctx, cancel := s.GetDefaultContext()
defer cancel()
condition := bson.D{{Key: "Customer", Value: customerId}, {Key: "Topic", Value: topic}}
cursor, err := s.Collection(mp.dbName, CustomerCollectName).Find(ctx, condition)
if err != nil {
log.SError("Load topic ", topic, " customer ", customerId, " is fail:", err.Error())
return 0, false
}
type findRes struct {
Index uint64 `bson:"Index,omitempty"`
}
var res []findRes
ctxAll, cancelAll := s.GetDefaultContext()
defer cancelAll()
err = cursor.All(ctxAll, &res)
if err != nil {
log.SError("Load topic ", topic, " customer ", customerId, " is fail:", err.Error())
return 0, false
}
if len(res) == 0 {
return 0, true
}
return res[0].Index, true
}
// GetIndex 通过topic数据获取进度索引号
func (mp *MongoPersist) GetIndex(topicData *TopicData) uint64 {
if topicData.Seq > 0 {
return topicData.Seq
}
var document bson.D
err := bson.Unmarshal(topicData.RawData, &document)
if err != nil {
log.SError("GetIndex is fail ", err.Error())
return 0
}
for _, e := range document {
if e.Key == "_id" {
errC, seq := convertToNumber[uint64](e.Value)
if errC != nil {
log.Error("value is error:%s,%+v, ", errC.Error(), e.Value)
}
return seq
}
}
return topicData.Seq
}
// PersistIndex 持久化进度索引号
func (mp *MongoPersist) PersistIndex(topic string, customerId string, index uint64) {
s := mp.mongo.TakeSession()
condition := bson.D{{Key: "Customer", Value: customerId}, {Key: "Topic", Value: topic}}
upsert := bson.M{"Customer": customerId, "Topic": topic, "Index": index}
updata := bson.M{"$set": upsert}
var UpdateOptionsOpts []*options.UpdateOptions
UpdateOptionsOpts = append(UpdateOptionsOpts, options.Update().SetUpsert(true))
ctx, cancel := s.GetDefaultContext()
defer cancel()
_, err := s.Collection(mp.dbName, CustomerCollectName).UpdateOne(ctx, condition, updata, UpdateOptionsOpts...)
if err != nil {
log.SError("PersistIndex fail :", err.Error())
}
}

View File

@@ -0,0 +1,122 @@
package messagequeueservice
import (
"fmt"
"go.mongodb.org/mongo-driver/bson"
"testing"
"time"
)
var seq uint64
var lastTime int64
func NextSeq(addDays int) uint64 {
now := time.Now().AddDate(0, 0, addDays)
nowSec := now.Unix()
if nowSec != lastTime {
seq = 0
lastTime = nowSec
}
//必需从1开始查询时seq>0
seq += 1
return uint64(nowSec)<<32 | uint64(seq)
}
func Test_MongoPersist(t *testing.T) {
//1.初始化
var mongoPersist MongoPersist
mongoPersist.url = "mongodb://admin:123456@192.168.2.15:27017/?minPoolSize=5&maxPoolSize=35&maxIdleTimeMS=30000"
mongoPersist.dbName = "MongoPersistTest"
mongoPersist.retryCount = 10
mongoPersist.OnInit()
//2.
//加载索引
index, ret := mongoPersist.LoadCustomerIndex("TestTopic", "TestCustomer")
fmt.Println(index, ret)
now := time.Now()
zeroTime := time.Date(now.Year(), now.Month(), now.Day()+1, 0, 0, 0, 0, now.Location())
//fmt.Println(zeroTime.Unix())
startIndex := uint64(zeroTime.Unix()<<32) | 1
//存储索引
mongoPersist.PersistIndex("TestTopic", "TestCustomer", startIndex)
//加载索引
index, ret = mongoPersist.LoadCustomerIndex("TestTopic", "TestCustomer")
type RowTest struct {
Name string `bson:"Name,omitempty"`
MapTest map[int]int `bson:"MapTest,omitempty"`
Message string `bson:"Message,omitempty"`
}
type RowTest2 struct {
Id uint64 `bson:"_id,omitempty"`
Name string `bson:"Name,omitempty"`
MapTest map[int]int `bson:"MapTest,omitempty"`
Message string `bson:"Message,omitempty"`
}
//存档
var findStartIndex uint64
var topicData []TopicData
for i := 1; i <= 1000; i++ {
var rowTest RowTest
rowTest.Name = fmt.Sprintf("Name_%d", i)
rowTest.MapTest = make(map[int]int, 1)
rowTest.MapTest[i] = i*1000 + i
rowTest.Message = fmt.Sprintf("xxxxxxxxxxxxxxxxxx%d", i)
byteRet, _ := bson.Marshal(rowTest)
var dataSeq uint64
if i <= 500 {
dataSeq = NextSeq(-1)
} else {
dataSeq = NextSeq(0)
}
topicData = append(topicData, TopicData{RawData: byteRet, Seq: dataSeq})
if i == 1 {
findStartIndex = topicData[0].Seq
}
}
mongoPersist.OnReceiveTopicData("TestTopic", topicData)
for {
if len(topicData) == 0 {
break
}
topicData, ret = mongoPersist.PersistTopicData("TestTopic", topicData, 1)
fmt.Println(ret)
}
//
for {
retTopicData := mongoPersist.FindTopicData("TestTopic", findStartIndex, 300)
for i, data := range retTopicData {
var rowTest RowTest2
bson.Unmarshal(data.RawData, &rowTest)
t.Log(rowTest.Name)
if i == len(retTopicData)-1 {
findStartIndex = mongoPersist.GetIndex(&data)
}
}
t.Log("..................")
if len(retTopicData) == 0 {
break
}
}
//t.Log(mongoPersist.GetIndex(&retTopicData[0]))
//t.Log(mongoPersist.GetIndex(&retTopicData[len(retTopicData)-1]))
}

View File

@@ -0,0 +1,91 @@
package messagequeueservice
import (
"github.com/duanhf2012/origin/v2/log"
"github.com/duanhf2012/origin/v2/rpc"
"sync"
)
// 订阅器
type Subscriber struct {
customerLocker sync.RWMutex
mapCustomer map[string]*CustomerSubscriber
queue MemoryQueue
dataPersist QueueDataPersist //对列数据处理器
queueWait *sync.WaitGroup
}
func (ss *Subscriber) Init(memoryQueueCap int32) {
ss.queue.Init(memoryQueueCap)
ss.mapCustomer = make(map[string]*CustomerSubscriber, 5)
}
func (ss *Subscriber) PushTopicDataToQueue(topic string, topics []TopicData) {
for i := 0; i < len(topics); i++ {
ss.queue.Push(&topics[i])
}
}
func (ss *Subscriber) PersistTopicData(topic string, topics []TopicData, retryCount int) ([]TopicData, []TopicData, bool) {
return ss.dataPersist.PersistTopicData(topic, topics, retryCount)
}
func (ss *Subscriber) TopicSubscribe(rpcHandler rpc.IRpcHandler, subScribeType rpc.SubscribeType, subscribeMethod SubscribeMethod, fromNodeId string, callBackRpcMethod string, topic string, customerId string, StartIndex uint64, oneBatchQuantity int32) error {
//取消订阅时
if subScribeType == rpc.SubscribeType_Unsubscribe {
ss.UnSubscribe(customerId)
return nil
} else {
ss.customerLocker.Lock()
customerSubscriber, ok := ss.mapCustomer[customerId]
if ok == true {
//已经订阅过,则取消订阅
customerSubscriber.UnSubscribe()
delete(ss.mapCustomer, customerId)
}
//不存在,则订阅
customerSubscriber = &CustomerSubscriber{}
ss.mapCustomer[customerId] = customerSubscriber
ss.customerLocker.Unlock()
err := customerSubscriber.Subscribe(rpcHandler, ss, topic, subscribeMethod, customerId, fromNodeId, callBackRpcMethod, StartIndex, oneBatchQuantity)
if err != nil {
return err
}
if ok == true {
log.Info("repeat subscription for customer ", customerId)
} else {
log.Info("subscription for customer ", customerId)
}
}
return nil
}
func (ss *Subscriber) UnSubscribe(customerId string) {
ss.customerLocker.RLocker()
defer ss.customerLocker.RUnlock()
customerSubscriber, ok := ss.mapCustomer[customerId]
if ok == false {
log.SWarning("failed to unsubscribe customer " + customerId)
return
}
customerSubscriber.UnSubscribe()
}
func (ss *Subscriber) removeCustomer(customerId string, cs *CustomerSubscriber) {
ss.customerLocker.Lock()
//确保删掉是当前的关系。有可能在替换订阅时将该customer替换的情况
customer, _ := ss.mapCustomer[customerId]
if customer == cs {
delete(ss.mapCustomer, customerId)
}
ss.customerLocker.Unlock()
}

View File

@@ -0,0 +1,149 @@
package messagequeueservice
import (
"errors"
"github.com/duanhf2012/origin/v2/log"
"github.com/duanhf2012/origin/v2/util/coroutine"
"sync"
"sync/atomic"
"time"
)
type TopicData struct {
Seq uint64 //序号
RawData []byte //原始数据
ExtendParam interface{} //扩展参数
}
func (t TopicData) GetValue() uint64 {
return t.Seq
}
var topicFullError = errors.New("topic room is full")
const DefaultOnceProcessTopicDataNum = 1024 //一次处理的topic数量考虑批量落地的数量
const DefaultMaxTopicBacklogNum = 100000 //处理的channel最大数量
const DefaultMemoryQueueLen = 50000 //内存的最大长度
const maxTryPersistNum = 3000 //最大重试次数,约>5分钟
type TopicRoom struct {
topic string //主题名称
channelTopic chan TopicData //主题push过来待处理的数据
Subscriber //订阅器
//序号生成
seq uint32
lastTime int64
//onceProcessTopicDataNum int //一次处理的订阅数据最大量方便订阅器Subscriber和QueueDataProcessor批量处理
StagingBuff []TopicData
isStop int32
}
// maxProcessTopicBacklogNum:主题最大积压数量
func (tr *TopicRoom) Init(maxTopicBacklogNum int32, memoryQueueLen int32, topic string, queueWait *sync.WaitGroup, dataPersist QueueDataPersist) {
if maxTopicBacklogNum == 0 {
maxTopicBacklogNum = DefaultMaxTopicBacklogNum
}
tr.channelTopic = make(chan TopicData, maxTopicBacklogNum)
tr.topic = topic
tr.dataPersist = dataPersist
tr.queueWait = queueWait
tr.StagingBuff = make([]TopicData, DefaultOnceProcessTopicDataNum)
tr.queueWait.Add(1)
tr.Subscriber.Init(memoryQueueLen)
coroutine.GoRecover(tr.topicRoomRun, -1)
}
func (tr *TopicRoom) Publish(data [][]byte) error {
if len(tr.channelTopic)+len(data) > cap(tr.channelTopic) {
return topicFullError
}
//生成有序序号
for _, rawData := range data {
tr.channelTopic <- TopicData{RawData: rawData, Seq: tr.NextSeq()}
}
return nil
}
func (tr *TopicRoom) NextSeq() uint64 {
now := time.Now()
nowSec := now.Unix()
if nowSec != tr.lastTime {
tr.seq = 0
tr.lastTime = nowSec
}
//必需从1开始查询时seq>0
tr.seq += 1
return uint64(nowSec)<<32 | uint64(tr.seq)
}
func (tr *TopicRoom) Stop() {
atomic.StoreInt32(&tr.isStop, 1)
}
func (tr *TopicRoom) topicRoomRun() {
defer tr.queueWait.Done()
log.Info("topic room ", tr.topic, " is running..")
for {
if atomic.LoadInt32(&tr.isStop) != 0 {
break
}
stagingBuff := tr.StagingBuff[:0]
for i := 0; i < len(tr.channelTopic) && i < DefaultOnceProcessTopicDataNum; i++ {
topicData := <-tr.channelTopic
stagingBuff = append(stagingBuff, topicData)
}
tr.Subscriber.dataPersist.OnReceiveTopicData(tr.topic, stagingBuff)
//持久化与放内存
if len(stagingBuff) == 0 {
time.Sleep(time.Millisecond * 50)
continue
}
//如果落地失败最大重试maxTryPersistNum次数
for retryCount := 0; retryCount < maxTryPersistNum; {
//持久化处理
stagingBuff, savedBuff, ret := tr.PersistTopicData(tr.topic, stagingBuff, retryCount+1)
if ret == true {
// 1. 把成功存储的数据放入内存中
if len(savedBuff) > 0 {
tr.PushTopicDataToQueue(tr.topic, savedBuff)
}
// 2. 如果存档成功,并且有后续批次,则继续存档
if ret == true && len(stagingBuff) > 0 {
continue
}
// 3. 成功了,跳出
break
} else {
//计数增加一次并且等待100ms继续重试
retryCount++
time.Sleep(time.Millisecond * 100)
}
}
}
//将所有的订阅取消
tr.customerLocker.Lock()
for _, customer := range tr.mapCustomer {
customer.UnSubscribe()
}
tr.customerLocker.Unlock()
log.Info("topic room ", tr.topic, " is stop")
}

View File

@@ -0,0 +1,429 @@
package rankservice
import (
"fmt"
"github.com/duanhf2012/origin/v2/log"
"github.com/duanhf2012/origin/v2/rpc"
"github.com/duanhf2012/origin/v2/service"
"github.com/duanhf2012/origin/v2/sysmodule/mongodbmodule"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo/options"
"runtime"
"sync"
"sync/atomic"
"time"
)
const batchRemoveNum = 128 //一切删除的最大数量
// RankDataDB 排行表数据
type RankDataDB struct {
Id uint64 `bson:"_id"`
RefreshTime int64 `bson:"RefreshTime"`
SortData []int64 `bson:"SortData"`
Data []byte `bson:"Data"`
ExData []int64 `bson:"ExData"`
}
// MongoPersist持久化Module
type MongoPersist struct {
service.Module
mongo mongodbmodule.MongoModule
url string //Mongodb连接url
dbName string //数据库名称
SaveInterval time.Duration //落地数据库时间间隔
sync.Mutex
mapRemoveRankData map[uint64]map[uint64]struct{} //将要删除的排行数据 map[RankId]map[Key]struct{}
mapUpsertRankData map[uint64]map[uint64]RankData //需要upsert的排行数据 map[RankId][key]RankData
mapRankSkip map[uint64]IRankSkip //所有的排行榜对象map[RankId]IRankSkip
maxRetrySaveCount int //存档重试次数
retryTimeIntervalMs time.Duration //重试时间间隔
lastSaveTime time.Time //最后一次存档时间
stop int32 //是否停服
waitGroup sync.WaitGroup //等待停服
}
func (mp *MongoPersist) OnInit() error {
mp.mapRemoveRankData = map[uint64]map[uint64]struct{}{}
mp.mapUpsertRankData = map[uint64]map[uint64]RankData{}
mp.mapRankSkip = map[uint64]IRankSkip{}
if errC := mp.ReadCfg(); errC != nil {
return errC
}
//初始化MongoDB
err := mp.mongo.Init(mp.url, time.Second*15)
if err != nil {
return err
}
//开始运行
err = mp.mongo.Start()
if err != nil {
log.SError("start dbService[", mp.dbName, "], url[", mp.url, "] init error:", err.Error())
return err
}
//开启协程
mp.waitGroup.Add(1)
go mp.persistCoroutine()
return nil
}
func (mp *MongoPersist) ReadCfg() error {
mapDBServiceCfg, ok := mp.GetService().GetServiceCfg().(map[string]interface{})
if ok == false {
return fmt.Errorf("RankService config is error")
}
//读取数据库配置
saveMongoCfg,ok := mapDBServiceCfg["SaveMongo"]
if ok == false {
return fmt.Errorf("RankService.SaveMongo config is error")
}
mongodbCfg,ok := saveMongoCfg.(map[string]interface{})
if ok == false {
return fmt.Errorf("RankService.SaveMongo config is error")
}
url, ok := mongodbCfg["Url"]
if ok == false {
return fmt.Errorf("RankService.SaveMongo.Url config is error")
}
mp.url = url.(string)
dbName, ok := mongodbCfg["DBName"]
if ok == false {
return fmt.Errorf("RankService.SaveMongo.DBName config is error")
}
mp.dbName = dbName.(string)
saveInterval, ok := mongodbCfg["SaveIntervalMs"]
if ok == false {
return fmt.Errorf("RankService.SaveMongo.SaveIntervalMs config is error")
}
mp.SaveInterval = time.Duration(saveInterval.(float64))*time.Millisecond
maxRetrySaveCount, ok := mongodbCfg["MaxRetrySaveCount"]
if ok == false {
return fmt.Errorf("RankService.SaveMongo.MaxRetrySaveCount config is error")
}
mp.maxRetrySaveCount = int(maxRetrySaveCount.(float64))
retryTimeIntervalMs, ok := mongodbCfg["RetryTimeIntervalMs"]
if ok == false {
return fmt.Errorf("RankService.SaveMongo.RetryTimeIntervalMs config is error")
}
mp.retryTimeIntervalMs = time.Duration(retryTimeIntervalMs.(float64))*time.Millisecond
return nil
}
//启服从数据库加载
func (mp *MongoPersist) OnStart() {
}
func (mp *MongoPersist) OnSetupRank(manual bool,rankSkip *RankSkip) error{
if mp.mapRankSkip == nil {
mp.mapRankSkip = map[uint64]IRankSkip{}
}
mp.mapRankSkip[rankSkip.GetRankID()] = rankSkip
if manual == true {
return nil
}
log.Info("start load rank ",rankSkip.GetRankName()," from mongodb.")
err := mp.loadFromDB(rankSkip.GetRankID(),rankSkip.GetRankName())
if err != nil {
log.SError("load from db is fail :%s",err.Error())
return err
}
log.Info("finish load rank ",rankSkip.GetRankName()," from mongodb.")
return nil
}
func (mp *MongoPersist) loadFromDB(rankId uint64,rankCollectName string) error{
s := mp.mongo.TakeSession()
ctx, cancel := s.GetDefaultContext()
defer cancel()
condition := bson.D{}
cursor, err := s.Collection(mp.dbName, rankCollectName).Find(ctx, condition)
if err != nil {
log.SError("find collect name ", rankCollectName, " is error:", err.Error())
return err
}
if cursor.Err()!=nil {
log.SError("find collect name ", rankCollectName, " is error:", cursor.Err().Error())
return err
}
rankSkip := mp.mapRankSkip[rankId]
if rankSkip == nil {
err = fmt.Errorf("rank ", rankCollectName, " is not setup:")
log.SError(err.Error())
return err
}
defer cursor.Close(ctx)
for cursor.Next(ctx) {
var rankDataDB RankDataDB
err = cursor.Decode(&rankDataDB)
if err != nil {
log.SError(" collect name ", rankCollectName, " Decode is error:", err.Error())
return err
}
var rankData rpc.RankData
rankData.Data = rankDataDB.Data
rankData.Key = rankDataDB.Id
rankData.SortData = rankDataDB.SortData
for _,eData := range rankDataDB.ExData{
rankData.ExData = append(rankData.ExData,&rpc.ExtendIncData{InitValue:eData})
}
//更新到排行榜
rankSkip.UpsetRank(&rankData,rankDataDB.RefreshTime,true)
}
return nil
}
func (mp *MongoPersist) lazyInitRemoveMap(rankId uint64){
if mp.mapRemoveRankData[rankId] == nil {
mp.mapRemoveRankData[rankId] = make(map[uint64]struct{},256)
}
}
func (mp *MongoPersist) lazyInitUpsertMap(rankId uint64){
if mp.mapUpsertRankData[rankId] == nil {
mp.mapUpsertRankData[rankId] = make(map[uint64]RankData,256)
}
}
func (mp *MongoPersist) OnEnterRank(rankSkip IRankSkip, enterData *RankData){
mp.Lock()
defer mp.Unlock()
delete(mp.mapRemoveRankData,enterData.Key)
mp.lazyInitUpsertMap(rankSkip.GetRankID())
mp.mapUpsertRankData[rankSkip.GetRankID()][enterData.Key] = *enterData
}
func (mp *MongoPersist) OnLeaveRank(rankSkip IRankSkip, leaveData *RankData){
mp.Lock()
defer mp.Unlock()
//先删掉更新中的数据
delete(mp.mapUpsertRankData,leaveData.Key)
mp.lazyInitRemoveMap(rankSkip.GetRankID())
mp.mapRemoveRankData[rankSkip.GetRankID()][leaveData.Key] = struct{}{}
}
func (mp *MongoPersist) OnChangeRankData(rankSkip IRankSkip, changeData *RankData){
mp.Lock()
defer mp.Unlock()
//先删掉要删除的数据
delete(mp.mapRemoveRankData,changeData.Key)
//更新数据
mp.lazyInitUpsertMap(rankSkip.GetRankID())
mp.mapUpsertRankData[rankSkip.GetRankID()][changeData.Key] = *changeData
}
//停存持久化到DB
func (mp *MongoPersist) OnStop(mapRankSkip map[uint64]*RankSkip){
atomic.StoreInt32(&mp.stop,1)
mp.waitGroup.Wait()
}
func (mp *MongoPersist) JugeTimeoutSave() bool{
timeout := time.Now()
isTimeOut := timeout.Sub(mp.lastSaveTime) >= mp.SaveInterval
if isTimeOut == true {
mp.lastSaveTime = timeout
}
return isTimeOut
}
func (mp *MongoPersist) persistCoroutine(){
defer mp.waitGroup.Done()
for atomic.LoadInt32(&mp.stop)==0 {
//间隔时间sleep
time.Sleep(time.Second*1)
//没有持久化数据continue
if mp.hasPersistData() == false {
continue
}
if mp.JugeTimeoutSave() == false{
continue
}
//存档数据到数据库
mp.saveToDB()
}
//退出时存一次档
mp.saveToDB()
}
func (mp *MongoPersist) hasPersistData() bool{
mp.Lock()
defer mp.Unlock()
return len(mp.mapUpsertRankData)>0 || len(mp.mapRemoveRankData) >0
}
func (mp *MongoPersist) saveToDB(){
defer func() {
if r := recover(); r != nil {
buf := make([]byte, 4096)
l := runtime.Stack(buf, false)
errString := fmt.Sprint(r)
log.Dump(string(buf[:l]),log.String("error",errString))
}
}()
//1.copy数据
mp.Lock()
mapRemoveRankData := mp.mapRemoveRankData
mapUpsertRankData := mp.mapUpsertRankData
mp.mapRemoveRankData = map[uint64]map[uint64]struct{}{}
mp.mapUpsertRankData = map[uint64]map[uint64]RankData{}
mp.Unlock()
//2.存档
for len(mapUpsertRankData) > 0 {
mp.upsertRankDataToDB(mapUpsertRankData)
}
for len(mapRemoveRankData) >0 {
mp.removeRankDataToDB(mapRemoveRankData)
}
}
func (mp *MongoPersist) removeToDB(collectName string,keys []uint64) error{
s := mp.mongo.TakeSession()
ctx, cancel := s.GetDefaultContext()
defer cancel()
condition := bson.D{{Key: "_id", Value: bson.M{"$in": keys}}}
_, err := s.Collection(mp.dbName, collectName).DeleteMany(ctx, condition)
if err != nil {
log.SError("MongoPersist DeleteMany fail,collect name is ", collectName)
return err
}
return nil
}
func (mp *MongoPersist) removeRankData(rankId uint64,keys []uint64) bool {
rank := mp.mapRankSkip[rankId]
if rank== nil {
log.SError("cannot find rankId ",rankId,"config")
return false
}
//不成功则重试maxRetrySaveCount次
for i:=0;i<mp.maxRetrySaveCount;i++{
if mp.removeToDB(rank.GetRankName(),keys)!= nil {
time.Sleep(mp.retryTimeIntervalMs)
continue
}
break
}
return true
}
func (mp *MongoPersist) upsertToDB(collectName string,rankData *RankData) error{
condition := bson.D{{"_id", rankData.Key}}
upsert := bson.M{"_id":rankData.Key,"RefreshTime": rankData.RefreshTimestamp, "SortData": rankData.SortData, "Data": rankData.Data,"ExData":rankData.ExData}
update := bson.M{"$set": upsert}
s := mp.mongo.TakeSession()
ctx, cancel := s.GetDefaultContext()
defer cancel()
updateOpts := options.Update().SetUpsert(true)
_, err := s.Collection(mp.dbName, collectName).UpdateOne(ctx, condition,update,updateOpts)
if err != nil {
log.SError("MongoPersist upsertDB fail,collect name is ", collectName)
return err
}
return nil
}
func (mp *MongoPersist) upsertRankDataToDB(mapUpsertRankData map[uint64]map[uint64]RankData) error{
for rankId,mapRankData := range mapUpsertRankData{
rank,ok := mp.mapRankSkip[rankId]
if ok == false {
log.SError("cannot find rankId ",rankId,",config is error")
delete(mapUpsertRankData,rankId)
continue
}
for key,rankData := range mapRankData{
//最大重试mp.maxRetrySaveCount次
for i:=0;i<mp.maxRetrySaveCount;i++{
err := mp.upsertToDB(rank.GetRankName(),&rankData)
if err != nil {
time.Sleep(mp.retryTimeIntervalMs)
continue
}
break
}
//存完删掉指定key
delete(mapRankData,key)
}
if len(mapRankData) == 0 {
delete(mapUpsertRankData,rankId)
}
}
return nil
}
func (mp *MongoPersist) removeRankDataToDB(mapRemoveRankData map[uint64]map[uint64]struct{}) {
for rankId ,mapRemoveKey := range mapRemoveRankData{
//每100个一删
keyList := make([]uint64,0,batchRemoveNum)
for key := range mapRemoveKey {
delete(mapRemoveKey,key)
keyList = append(keyList,key)
if len(keyList) >= batchRemoveNum {
break
}
}
mp.removeRankData(rankId,keyList)
//如果删完删掉rankid下所有
if len(mapRemoveKey) == 0 {
delete(mapRemoveRankData,rankId)
}
}
}

View File

@@ -0,0 +1,109 @@
package rankservice
import (
"github.com/duanhf2012/origin/v2/rpc"
"github.com/duanhf2012/origin/v2/util/algorithms/skip"
"github.com/duanhf2012/origin/v2/util/sync"
)
var emptyRankData RankData
var RankDataPool = sync.NewPoolEx(make(chan sync.IPoolData, 10240), func() sync.IPoolData {
var newRankData RankData
return &newRankData
})
type RankData struct {
Key uint64
SortData []int64
Data []byte
ExData []int64
RefreshTimestamp int64 //刷新时间
//bRelease bool
ref bool
compareFunc func(other skip.Comparator) int
}
func NewRankData(isDec bool, data *rpc.RankData,refreshTimestamp int64) *RankData {
ret := RankDataPool.Get().(*RankData)
ret.compareFunc = ret.ascCompare
if isDec {
ret.compareFunc = ret.desCompare
}
ret.Key = data.Key
ret.SortData = data.SortData
ret.Data = data.Data
for _,d := range data.ExData{
ret.ExData = append(ret.ExData,d.InitValue+d.IncreaseValue)
}
ret.RefreshTimestamp = refreshTimestamp
return ret
}
func ReleaseRankData(rankData *RankData) {
RankDataPool.Put(rankData)
}
func (p *RankData) Reset() {
*p = emptyRankData
}
func (p *RankData) IsRef() bool {
return p.ref
}
func (p *RankData) Ref() {
p.ref = true
}
func (p *RankData) UnRef() {
p.ref = false
}
func (p *RankData) Compare(other skip.Comparator) int {
return p.compareFunc(other)
}
func (p *RankData) GetKey() uint64 {
return p.Key
}
func (p *RankData) ascCompare(other skip.Comparator) int {
otherRankData := other.(*RankData)
if otherRankData.Key == p.Key {
return 0
}
retFlg := compareMoreThan(p.SortData, otherRankData.SortData)
if retFlg == 0 {
if p.Key > otherRankData.Key {
retFlg = 1
} else {
retFlg = -1
}
}
return retFlg
}
func (p *RankData) desCompare(other skip.Comparator) int {
otherRankData := other.(*RankData)
if otherRankData.Key == p.Key {
return 0
}
retFlg := compareMoreThan(otherRankData.SortData, p.SortData)
if retFlg == 0 {
if p.Key > otherRankData.Key {
retFlg = -1
} else {
retFlg = 1
}
}
return retFlg
}

View File

@@ -0,0 +1,125 @@
package rankservice
import (
"container/heap"
"github.com/duanhf2012/origin/v2/util/sync"
"time"
)
var expireDataPool = sync.NewPoolEx(make(chan sync.IPoolData, 10240), func() sync.IPoolData {
return &ExpireData{}
})
type ExpireData struct {
Index int
Key uint64
RefreshTimestamp int64
ref bool
}
type rankDataHeap struct {
rankDatas []*ExpireData
expireMs int64
mapExpireData map[uint64]*ExpireData
}
var expireData ExpireData
func (ed *ExpireData) Reset(){
*ed = expireData
}
func (ed *ExpireData) IsRef() bool{
return ed.ref
}
func (ed *ExpireData) Ref(){
ed.ref = true
}
func (ed *ExpireData) UnRef(){
ed.ref = false
}
func (rd *rankDataHeap) Init(maxRankDataCount int32,expireMs time.Duration){
rd.rankDatas = make([]*ExpireData,0,maxRankDataCount)
rd.expireMs = int64(expireMs)
rd.mapExpireData = make(map[uint64]*ExpireData,512)
heap.Init(rd)
}
func (rd *rankDataHeap) Len() int {
return len(rd.rankDatas)
}
func (rd *rankDataHeap) Less(i, j int) bool {
return rd.rankDatas[i].RefreshTimestamp < rd.rankDatas[j].RefreshTimestamp
}
func (rd *rankDataHeap) Swap(i, j int) {
rd.rankDatas[i], rd.rankDatas[j] = rd.rankDatas[j], rd.rankDatas[i]
rd.rankDatas[i].Index,rd.rankDatas[j].Index = i,j
}
func (rd *rankDataHeap) Push(x interface{}) {
ed := x.(*ExpireData)
ed.Index = len(rd.rankDatas)
rd.rankDatas = append(rd.rankDatas,ed)
}
func (rd *rankDataHeap) Pop() (ret interface{}) {
l := len(rd.rankDatas)
var retData *ExpireData
rd.rankDatas, retData = rd.rankDatas[:l-1], rd.rankDatas[l-1]
retData.Index = -1
ret = retData
return
}
func (rd *rankDataHeap) PopExpireKey() uint64{
if rd.Len() <= 0 {
return 0
}
if rd.rankDatas[0].RefreshTimestamp+rd.expireMs > time.Now().UnixNano() {
return 0
}
rankData := heap.Pop(rd).(*ExpireData)
delete(rd.mapExpireData,rankData.Key)
return rankData.Key
}
func (rd *rankDataHeap) PushOrRefreshExpireKey(key uint64,refreshTimestamp int64){
//1.先删掉之前的
expData ,ok := rd.mapExpireData[key]
if ok == true {
expData.RefreshTimestamp = refreshTimestamp
heap.Fix(rd,expData.Index)
return
}
//2.直接插入
expData = expireDataPool.Get().(*ExpireData)
expData.Key = key
expData.RefreshTimestamp = refreshTimestamp
rd.mapExpireData[key] = expData
heap.Push(rd,expData)
}
func (rd *rankDataHeap) RemoveExpireKey(key uint64){
expData ,ok := rd.mapExpireData[key]
if ok == false {
return
}
delete(rd.mapExpireData,key)
heap.Remove(rd,expData.Index)
expireDataPool.Put(expData)
}

View File

@@ -0,0 +1,52 @@
package rankservice
func transformLevel(level int32) interface{} {
switch level {
case 8:
return uint8(0)
case 16:
return uint16(0)
case 32:
return uint32(0)
case 64:
return uint64(0)
default:
return uint32(0)
}
}
func compareIsEqual(firstSortData, secondSortData []int64) bool {
firstLen := len(firstSortData)
if firstLen != len(secondSortData) {
return false
}
for i := firstLen - 1; i >= 0; i-- {
if firstSortData[i] != secondSortData[i] {
return false
}
}
return true
}
func compareMoreThan(firstSortData, secondSortData []int64) int {
firstLen := len(firstSortData)
secondLen := len(secondSortData)
minLen := firstLen
if firstLen > secondLen {
minLen = secondLen
}
for i := 0; i < minLen; i++ {
if firstSortData[i] > secondSortData[i] {
return 1
}
if firstSortData[i] < secondSortData[i] {
return -1
}
}
return 0
}

View File

@@ -0,0 +1,47 @@
package rankservice
import (
"github.com/duanhf2012/origin/v2/service"
"github.com/duanhf2012/origin/v2/rpc"
)
type RankDataChangeType int8
type IRankSkip interface {
GetRankID() uint64
GetRankName() string
GetRankLen() uint64
UpsetRank(upsetData *rpc.RankData,refreshTimestamp int64,fromLoad bool) RankDataChangeType
}
type IRankModule interface {
service.IModule
OnSetupRank(manual bool,rankSkip *RankSkip) error //当完成安装排行榜对象时
OnStart() //服务开启时回调
OnEnterRank(rankSkip IRankSkip, enterData *RankData) //进入排行
OnLeaveRank(rankSkip IRankSkip, leaveData *RankData) //离开排行
OnChangeRankData(rankSkip IRankSkip, changeData *RankData) //当排行数据变化时
OnStop(mapRankSkip map[uint64]*RankSkip) //服务结束时回调
}
type DefaultRankModule struct {
service.Module
}
func (dr *DefaultRankModule) OnStart(mapRankSkip map[uint64]*RankSkip) error {
return nil
}
func (dr *DefaultRankModule) OnEnterRank(rankSkip IRankSkip, enterData []*RankData) {
}
func (dr *DefaultRankModule) OnLeaveRank(rankSkip IRankSkip, leaveData []*RankData) {
}
func (dr *DefaultRankModule) OnChangeRankData(rankSkip IRankSkip, changeData []*RankData) {
}
func (dr *DefaultRankModule) OnStop(mapRankSkip map[uint64]*RankSkip) {
}

Some files were not shown because too many files have changed in this diff Show More