Compare commits

...

31 Commits

Author SHA1 Message Date
duanhf2012
0f3a965d73 补充与优化rpc超时功能RAEDME说明 2023-08-01 11:05:51 +08:00
duanhf2012
dfb6959843 优化消息队列默认超时推送时间 2023-07-28 17:43:39 +08:00
duanhf2012
dd4aaf9c57 优化rpc超时 2023-07-28 17:38:52 +08:00
duanhf2012
6ef98a2104 优化新增Rpc压缩与自定义超时功能 2023-07-25 17:36:47 +08:00
duanhf2012
1890b300ee 优化新增rpc压缩功能 2023-07-25 15:13:58 +08:00
duanhf2012
6fea2226e1 优化协议解析器 2023-07-21 17:03:15 +08:00
duanhf2012
ec1c2b4517 node支持rpc压缩 2023-07-21 15:28:52 +08:00
duanhf2012
4b84d9a1d5 新增rpc自定义超时 2023-07-13 16:42:23 +08:00
duanhf2012
85a8ec58e5 rpc加入压缩功能 2023-07-10 14:22:23 +08:00
duanhf2012
962016d476 优化rpc 2023-07-07 13:50:57 +08:00
duanhf2012
a61979e985 优化消息队列服务持久化 2023-05-17 11:26:29 +08:00
duanhf2012
6de25d1c6d 优化rankservice错误返回 2023-05-09 14:34:33 +08:00
duanhf2012
b392617d6e 优化性能监控与rankservice持久化 2023-05-09 14:06:17 +08:00
duanhf2012
92fdb7860c 优化本地node中的服务rpc 2023-05-04 17:53:42 +08:00
duanhf2012
f78d0d58be 优化rpc与rankservice持久化 2023-05-04 17:35:40 +08:00
duanhf2012
5675681ab1 优化concurrent与rpc模块 2023-05-04 14:21:29 +08:00
duanhf2012
ddeaaf7d77 优化concurrent模块 2023-04-11 10:29:06 +08:00
duanhf2012
1174b47475 IService接口新增扩展IConcurrent 2023-04-04 16:36:05 +08:00
duanhf2012
18fff3b567 优化concurrent模块,新增返回值控制是否回调 2023-03-31 15:12:27 +08:00
duanhf2012
7ab6c88f9c 整理优化rpc 2023-03-23 10:06:41 +08:00
duanhf2012
6b64de06a2 优化增加TcpService的包长度字段配置 2023-03-22 14:59:22 +08:00
duanhf2012
95b153f8cf 优化network包长度字段自动计算 2023-03-20 15:20:04 +08:00
duanhf2012
f3ff09b90f 优化rpc调用错误日志
限制配置的服务必需安装
优化结点断开连接时删除结点
2023-03-17 12:09:00 +08:00
duanhf2012
f9738fb9d0 Merge branch 'master' of https://github.com/duanhf2012/origin 2023-03-06 15:57:33 +08:00
duanhf2012
91e773aa8c 补充说明RPC函数名的规范支持RPCFunctionName 2023-03-06 15:57:23 +08:00
origin
c9b96404f4 Merge pull request #873 from duanhf2012/dependabot/go_modules/golang.org/x/crypto-0.1.0
Bump golang.org/x/crypto from 0.0.0-20201216223049-8b5274cf687f to 0.1.0
2023-03-06 15:45:40 +08:00
duanhf2012
aaae63a674 新增支持RPC函数命名RPCXXX格式 2023-03-06 15:41:51 +08:00
duanhf2012
47dc21aee1 优化rpc返回参数与请求参数不一致时报错 2023-03-06 11:47:23 +08:00
dependabot[bot]
4d09532801 Bump golang.org/x/crypto from 0.0.0-20201216223049-8b5274cf687f to 0.1.0
Bumps [golang.org/x/crypto](https://github.com/golang/crypto) from 0.0.0-20201216223049-8b5274cf687f to 0.1.0.
- [Release notes](https://github.com/golang/crypto/releases)
- [Commits](https://github.com/golang/crypto/commits/v0.1.0)

---
updated-dependencies:
- dependency-name: golang.org/x/crypto
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-03-06 02:26:33 +00:00
origin
d3ad7fc898 Merge pull request #871 from duanhf2012/dependabot/go_modules/golang.org/x/text-0.3.8
Bump golang.org/x/text from 0.3.6 to 0.3.8
2023-03-06 10:08:56 +08:00
dependabot[bot]
ba2b0568b2 Bump golang.org/x/text from 0.3.6 to 0.3.8
Bumps [golang.org/x/text](https://github.com/golang/text) from 0.3.6 to 0.3.8.
- [Release notes](https://github.com/golang/text/releases)
- [Commits](https://github.com/golang/text/compare/v0.3.6...v0.3.8)

---
updated-dependencies:
- dependency-name: golang.org/x/text
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-02-23 07:30:17 +00:00
32 changed files with 868 additions and 352 deletions

View File

@@ -64,6 +64,7 @@ cluster.json如下
"Private": false,
"ListenAddr":"127.0.0.1:8001",
"MaxRpcParamLen": 409600,
"CompressBytesLen": 20480,
"NodeName": "Node_Test1",
"remark":"//以_打头的表示只在本机进程不对整个子网公开",
"ServiceList": ["TestService1","TestService2","TestServiceCall","GateService","_TcpService","HttpService","WSService"]
@@ -72,7 +73,8 @@ cluster.json如下
"NodeId": 2,
"Private": false,
"ListenAddr":"127.0.0.1:8002",
"MaxRpcParamLen": 409600,
"MaxRpcParamLen": 409600,
"CompressBytesLen": 20480,
"NodeName": "Node_Test1",
"remark":"//以_打头的表示只在本机进程不对整个子网公开",
"ServiceList": ["TestService1","TestService2","TestServiceCall","GateService","TcpService","HttpService","WSService"]
@@ -88,6 +90,7 @@ cluster.json如下
* Private: 是否私有结点如果为true表示其他结点不会发现它但可以自我运行。
* ListenAddr:Rpc通信服务的监听地址
* MaxRpcParamLen:Rpc参数数据包最大长度该参数可以缺省默认一次Rpc调用支持最大4294967295byte长度数据。
* CompressBytesLen:Rpc网络数据压缩当数据>=20480byte时将被压缩。该参数可以缺省或者填0时不进行压缩。
* NodeName:结点名称
* remark:备注,可选项
* ServiceList:该Node拥有的服务列表注意origin按配置的顺序进行安装初始化。但停止服务的顺序是相反。
@@ -667,6 +670,7 @@ type InputData struct {
B int
}
// 注意RPC函数名的格式必需为RPC_FunctionName或者是RPCFunctionName如下的RPC_Sum也可以写成RPCSum
func (slf *TestService6) RPC_Sum(input *InputData,output *int) error{
*output = input.A+input.B
return nil
@@ -714,6 +718,15 @@ func (slf *TestService7) CallTest(){
}else{
fmt.Printf("Call output %d\n",output)
}
//自定义超时,默认rpc超时时间为15s
err = slf.CallWithTimeout(time.Second*1, "TestService6.RPC_Sum", &input, &output)
if err != nil {
fmt.Printf("Call error :%+v\n", err)
} else {
fmt.Printf("Call output %d\n", output)
}
}
@@ -725,13 +738,27 @@ func (slf *TestService7) AsyncCallTest(){
})*/
//异步调用,在数据返回时,会回调传入函数
//注意函数的第一个参数一定是RPC_Sum函数的第二个参数err error为RPC_Sum返回值
slf.AsyncCall("TestService6.RPC_Sum",&input,func(output *int,err error){
err := slf.AsyncCall("TestService6.RPC_Sum", &input, func(output *int, err error) {
if err != nil {
fmt.Printf("AsyncCall error :%+v\n",err)
}else{
fmt.Printf("AsyncCall output %d\n",*output)
fmt.Printf("AsyncCall error :%+v\n", err)
} else {
fmt.Printf("AsyncCall output %d\n", *output)
}
})
fmt.Println(err)
//自定义超时,返回一个cancel函数可以在业务需要时取消rpc调用
rpcCancel, err := slf.AsyncCallWithTimeout(time.Second*1, "TestService6.RPC_Sum", &input, func(output *int, err error) {
//如果下面注释的rpcCancel()函数被调用,这里可能将不再返回
if err != nil {
fmt.Printf("AsyncCall error :%+v\n", err)
} else {
fmt.Printf("AsyncCall output %d\n", *output)
}
})
//rpcCancel()
fmt.Println(err, rpcCancel)
}
func (slf *TestService7) GoTest(){
@@ -766,34 +793,57 @@ func (slf *TestService7) GoTest(){
//slf.OpenConcurrent(5, 10, 1000000)
```
普通调用可以使用以下方法:
使用示例如下:
```
func (slf *TestService1) testAsyncDo() {
func (slf *TestService13) testAsyncDo() {
var context struct {
data int64
}
slf.AsyncDo(func() {
//1.示例普通使用
//参数一的函数在其他协程池中执行完成,将执行完成事件放入服务工作协程,
//参数二的函数在服务协程中执行,是协程安全的。
slf.AsyncDo(func() bool {
//该函数回调在协程池中执行
context.data = 100
return true
}, func(err error) {
//函数将在服务协程中执行
fmt.Print(context.data) //显示100
})
}
```
以下方法将函数扔到任务管道中,由协程池去抢执行。但某些任务是由先后顺序的,可以使用以下方法:
```
func (slf *TestService1) testAsyncDoByQueue() {
queueId := int64(1)
//2.示例按队列顺序
//参数一传入队列Id,同一个队列Id将在协程池中被排队执行
//以下进行两次调用因为两次都传入参数queueId都为1所以它们会都进入queueId为1的排队执行
queueId := int64(1)
for i := 0; i < 2; i++ {
slf.AsyncDoByQueue(queueId, func() {
slf.AsyncDoByQueue(queueId, func() bool {
//该函数会被2次调用但是会排队执行
return true
}, func(err error) {
//函数将在服务协程中执行
})
}
//3.函数参数可以某中一个为空
//参数二函数将被延迟执行
slf.AsyncDo(nil, func(err error) {
//将在下
})
//参数一函数在协程池中执行,但没有在服务协程中回调
slf.AsyncDo(func() bool {
return true
}, nil)
//4.函数返回值控制不进行回调
slf.AsyncDo(func() bool {
//返回false时参数二函数将不会被执行; 为true时则会被执行
return false
}, func(err error) {
//该函数将不会被执行
})
}
```

View File

@@ -26,6 +26,7 @@ type NodeInfo struct {
Private bool
ListenAddr string
MaxRpcParamLen uint32 //最大Rpc参数长度
CompressBytesLen int //超过字节进行压缩的长度
ServiceList []string //所有的有序服务列表
PublicServiceList []string //对外公开的服务列表
DiscoveryService []string //筛选发现的服务,如果不配置,不进行筛选
@@ -73,7 +74,7 @@ func SetServiceDiscovery(serviceDiscovery IServiceDiscovery) {
}
func (cls *Cluster) Start() {
cls.rpcServer.Start(cls.localNodeInfo.ListenAddr, cls.localNodeInfo.MaxRpcParamLen)
cls.rpcServer.Start(cls.localNodeInfo.ListenAddr, cls.localNodeInfo.MaxRpcParamLen,cls.localNodeInfo.CompressBytesLen)
}
func (cls *Cluster) Stop() {
@@ -195,7 +196,7 @@ func (cls *Cluster) serviceDiscoverySetNodeInfo(nodeInfo *NodeInfo) {
rpcInfo := NodeRpcInfo{}
rpcInfo.nodeInfo = *nodeInfo
rpcInfo.client =rpc.NewRClient(nodeInfo.NodeId, nodeInfo.ListenAddr, nodeInfo.MaxRpcParamLen,cls.triggerRpcEvent)
rpcInfo.client =rpc.NewRClient(nodeInfo.NodeId, nodeInfo.ListenAddr, nodeInfo.MaxRpcParamLen,cls.localNodeInfo.CompressBytesLen,cls.triggerRpcEvent)
cls.mapRpc[nodeInfo.NodeId] = rpcInfo
}

View File

@@ -5,6 +5,8 @@ import (
"github.com/duanhf2012/origin/log"
"github.com/duanhf2012/origin/rpc"
"github.com/duanhf2012/origin/service"
"time"
"github.com/duanhf2012/origin/util/timer"
)
const DynamicDiscoveryMasterName = "DiscoveryMaster"
@@ -60,6 +62,21 @@ func (ds *DynamicDiscoveryMaster) addNodeInfo(nodeInfo *rpc.NodeInfo) {
ds.nodeInfo = append(ds.nodeInfo, nodeInfo)
}
func (ds *DynamicDiscoveryMaster) removeNodeInfo(nodeId int32) {
if _,ok:= ds.mapNodeInfo[nodeId];ok == false {
return
}
for i:=0;i<len(ds.nodeInfo);i++ {
if ds.nodeInfo[i].NodeId == nodeId {
ds.nodeInfo = append(ds.nodeInfo[:i],ds.nodeInfo[i+1:]...)
break
}
}
delete(ds.mapNodeInfo,nodeId)
}
func (ds *DynamicDiscoveryMaster) OnInit() error {
ds.mapNodeInfo = make(map[int32]struct{}, 20)
ds.RegRpcListener(ds)
@@ -103,6 +120,8 @@ func (ds *DynamicDiscoveryMaster) OnNodeDisconnect(nodeId int) {
return
}
ds.removeNodeInfo(int32(nodeId))
var notifyDiscover rpc.SubscribeDiscoverNotify
notifyDiscover.MasterNodeId = int32(cluster.GetLocalNodeInfo().NodeId)
notifyDiscover.DelNodeId = int32(nodeId)
@@ -324,6 +343,10 @@ func (dc *DynamicDiscoveryClient) isDiscoverNode(nodeId int) bool {
}
func (dc *DynamicDiscoveryClient) OnNodeConnected(nodeId int) {
dc.regServiceDiscover(nodeId)
}
func (dc *DynamicDiscoveryClient) regServiceDiscover(nodeId int){
nodeInfo := cluster.GetMasterDiscoveryNodeInfo(nodeId)
if nodeInfo == nil {
return
@@ -347,6 +370,10 @@ func (dc *DynamicDiscoveryClient) OnNodeConnected(nodeId int) {
err := dc.AsyncCallNode(nodeId, RegServiceDiscover, &req, func(res *rpc.Empty, err error) {
if err != nil {
log.SError("call ", RegServiceDiscover, " is fail :", err.Error())
dc.AfterFunc(time.Second*3, func(timer *timer.Timer) {
dc.regServiceDiscover(nodeId)
})
return
}
})

View File

@@ -12,8 +12,8 @@ const defaultMaxTaskChannelNum = 1000000
type IConcurrent interface {
OpenConcurrentByNumCPU(cpuMul float32)
OpenConcurrent(minGoroutineNum int32, maxGoroutineNum int32, maxTaskChannelNum int)
AsyncDoByQueue(queueId int64, fn func(), cb func(err error))
AsyncDo(f func(), cb func(err error))
AsyncDoByQueue(queueId int64, fn func() bool, cb func(err error))
AsyncDo(f func() bool, cb func(err error))
}
type Concurrent struct {
@@ -40,11 +40,11 @@ func (c *Concurrent) OpenConcurrent(minGoroutineNum int32, maxGoroutineNum int32
c.dispatch.open(minGoroutineNum, maxGoroutineNum, c.tasks, c.cbChannel)
}
func (c *Concurrent) AsyncDo(f func(), cb func(err error)) {
func (c *Concurrent) AsyncDo(f func() bool, cb func(err error)) {
c.AsyncDoByQueue(0, f, cb)
}
func (c *Concurrent) AsyncDoByQueue(queueId int64, fn func(), cb func(err error)) {
func (c *Concurrent) AsyncDoByQueue(queueId int64, fn func() bool, cb func(err error)) {
if cap(c.tasks) == 0 {
panic("not open concurrent")
}
@@ -54,16 +54,6 @@ func (c *Concurrent) AsyncDoByQueue(queueId int64, fn func(), cb func(err error)
return
}
if len(c.tasks) > cap(c.tasks) {
log.SError("tasks channel is full")
if cb != nil {
c.pushAsyncDoCallbackEvent(func(err error) {
cb(errors.New("tasks channel is full"))
})
}
return
}
if fn == nil {
c.pushAsyncDoCallbackEvent(cb)
return
@@ -75,6 +65,14 @@ func (c *Concurrent) AsyncDoByQueue(queueId int64, fn func(), cb func(err error)
select {
case c.tasks <- task{queueId, fn, cb}:
default:
log.SError("tasks channel is full")
if cb != nil {
c.pushAsyncDoCallbackEvent(func(err error) {
cb(errors.New("tasks channel is full"))
})
}
return
}
}

View File

@@ -12,7 +12,7 @@ import (
"github.com/duanhf2012/origin/util/queue"
)
var idleTimeout = 2 * time.Second
var idleTimeout = int64(2 * time.Second)
const maxTaskQueueSessionId = 10000
type dispatch struct {
@@ -47,7 +47,7 @@ func (d *dispatch) open(minGoroutineNum int32, maxGoroutineNum int32, tasks chan
func (d *dispatch) run() {
defer d.waitDispatch.Done()
timeout := time.NewTimer(idleTimeout)
timeout := time.NewTimer(time.Duration(atomic.LoadInt64(&idleTimeout)))
for {
select {
@@ -65,9 +65,9 @@ func (d *dispatch) run() {
case <-timeout.C:
d.processTimer()
if atomic.LoadInt32(&d.minConcurrentNum) == -1 && len(d.tasks) == 0 {
idleTimeout = time.Millisecond * 10
atomic.StoreInt64(&idleTimeout,int64(time.Millisecond * 10))
}
timeout.Reset(idleTimeout)
timeout.Reset(time.Duration(atomic.LoadInt64(&idleTimeout)))
}
}
@@ -80,7 +80,7 @@ func (d *dispatch) run() {
}
func (d *dispatch) processTimer() {
if d.idle == true && d.workerNum > d.minConcurrentNum {
if d.idle == true && d.workerNum > atomic.LoadInt32(&d.minConcurrentNum) {
d.processIdle()
}

View File

@@ -12,7 +12,7 @@ import (
type task struct {
queueId int64
fn func()
fn func() bool
cb func(err error)
}
@@ -60,17 +60,18 @@ func (w *worker) exec(t *task) {
cb(errors.New(errString))
}
w.endCallFun(t)
w.endCallFun(true,t)
log.SError("core dump info[", errString, "]\n", string(buf[:l]))
}
}()
t.fn()
w.endCallFun(t)
w.endCallFun(t.fn(),t)
}
func (w *worker) endCallFun(t *task) {
w.pushAsyncDoCallbackEvent(t.cb)
func (w *worker) endCallFun(isDocallBack bool,t *task) {
if isDocallBack {
w.pushAsyncDoCallbackEvent(t.cb)
}
if t.queueId != 0 {
w.pushQueueTaskFinishEvent(t.queueId)

4
go.mod
View File

@@ -23,8 +23,8 @@ require (
github.com/xdg-go/scram v1.0.2 // indirect
github.com/xdg-go/stringprep v1.0.2 // indirect
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d // indirect
golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f // indirect
golang.org/x/crypto v0.1.0 // indirect
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 // indirect
golang.org/x/text v0.3.6 // indirect
golang.org/x/text v0.4.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
)

7
go.sum
View File

@@ -58,8 +58,9 @@ go.mongodb.org/mongo-driver v1.9.1/go.mod h1:0sQWfOeY63QTntERDJJ/0SuKK0T1uVSgKCu
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f h1:aZp0e2vLN4MToVqnjNEYEtrEA8RH8U8FN1CU7JgqsPU=
golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU=
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
@@ -79,8 +80,8 @@ golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXR
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=

View File

@@ -68,6 +68,11 @@ func (pbProcessor *PBProcessor) MsgRoute(clientId uint64, msg interface{}) error
// must goroutine safe
func (pbProcessor *PBProcessor) Unmarshal(clientId uint64, data []byte) (interface{}, error) {
defer pbProcessor.ReleaseByteSlice(data)
return pbProcessor.UnmarshalWithOutRelease(clientId, data)
}
// unmarshal but not release data
func (pbProcessor *PBProcessor) UnmarshalWithOutRelease(clientId uint64, data []byte) (interface{}, error) {
var msgType uint16
if pbProcessor.LittleEndian == true {
msgType = binary.LittleEndian.Uint16(data[:2])

View File

@@ -16,7 +16,7 @@ type memAreaPool struct {
pool []sync.Pool
}
var memAreaPoolList = [3]*memAreaPool{&memAreaPool{minAreaValue: 1, maxAreaValue: 4096, growthValue: 512}, &memAreaPool{minAreaValue: 4097, maxAreaValue: 40960, growthValue: 4096}, &memAreaPool{minAreaValue: 40961, maxAreaValue: 417792, growthValue: 16384}}
var memAreaPoolList = [4]*memAreaPool{&memAreaPool{minAreaValue: 1, maxAreaValue: 4096, growthValue: 512}, &memAreaPool{minAreaValue: 4097, maxAreaValue: 40960, growthValue: 4096}, &memAreaPool{minAreaValue: 40961, maxAreaValue: 417792, growthValue: 16384}, &memAreaPool{minAreaValue: 417793, maxAreaValue: 1925120, growthValue: 65536}}
func init() {
for i := 0; i < len(memAreaPoolList); i++ {

View File

@@ -64,20 +64,24 @@ func (client *TCPClient) init() {
if client.cons != nil {
log.SFatal("client is running")
}
if client.LenMsgLen == 0 {
client.LenMsgLen = Default_LenMsgLen
}
if client.MinMsgLen == 0 {
client.MinMsgLen = Default_MinMsgLen
}
if client.MaxMsgLen == 0 {
client.MaxMsgLen = Default_MaxMsgLen
}
if client.LenMsgLen ==0 {
client.LenMsgLen = Default_LenMsgLen
}
maxMsgLen := client.MsgParser.getMaxMsgLen(client.LenMsgLen)
if client.MaxMsgLen > maxMsgLen {
client.MaxMsgLen = maxMsgLen
log.SRelease("invalid MaxMsgLen, reset to ", maxMsgLen)
}
client.cons = make(ConnSet)
client.closeFlag = false
// msg parser
client.MsgParser.init()
}

View File

@@ -1,11 +1,12 @@
package network
import (
"errors"
"github.com/duanhf2012/origin/log"
"net"
"sync"
"sync/atomic"
"time"
"errors"
)
type ConnSet map[net.Conn]struct{}
@@ -14,7 +15,7 @@ type TCPConn struct {
sync.Mutex
conn net.Conn
writeChan chan []byte
closeFlag bool
closeFlag int32
msgParser *MsgParser
}
@@ -49,7 +50,7 @@ func newTCPConn(conn net.Conn, pendingWriteNum int, msgParser *MsgParser,writeDe
conn.Close()
tcpConn.Lock()
freeChannel(tcpConn)
tcpConn.closeFlag = true
atomic.StoreInt32(&tcpConn.closeFlag,1)
tcpConn.Unlock()
}()
@@ -60,9 +61,9 @@ func (tcpConn *TCPConn) doDestroy() {
tcpConn.conn.(*net.TCPConn).SetLinger(0)
tcpConn.conn.Close()
if !tcpConn.closeFlag {
if atomic.LoadInt32(&tcpConn.closeFlag)==0 {
close(tcpConn.writeChan)
tcpConn.closeFlag = true
atomic.StoreInt32(&tcpConn.closeFlag,1)
}
}
@@ -76,12 +77,12 @@ func (tcpConn *TCPConn) Destroy() {
func (tcpConn *TCPConn) Close() {
tcpConn.Lock()
defer tcpConn.Unlock()
if tcpConn.closeFlag {
if atomic.LoadInt32(&tcpConn.closeFlag)==1 {
return
}
tcpConn.doWrite(nil)
tcpConn.closeFlag = true
atomic.StoreInt32(&tcpConn.closeFlag,1)
}
func (tcpConn *TCPConn) GetRemoteIp() string {
@@ -104,7 +105,7 @@ func (tcpConn *TCPConn) doWrite(b []byte) error{
func (tcpConn *TCPConn) Write(b []byte) error{
tcpConn.Lock()
defer tcpConn.Unlock()
if tcpConn.closeFlag || b == nil {
if atomic.LoadInt32(&tcpConn.closeFlag)==1 || b == nil {
tcpConn.ReleaseReadMsg(b)
return errors.New("conn is close")
}
@@ -133,14 +134,14 @@ func (tcpConn *TCPConn) ReleaseReadMsg(byteBuff []byte){
}
func (tcpConn *TCPConn) WriteMsg(args ...[]byte) error {
if tcpConn.closeFlag == true {
if atomic.LoadInt32(&tcpConn.closeFlag) == 1 {
return errors.New("conn is close")
}
return tcpConn.msgParser.Write(tcpConn, args...)
}
func (tcpConn *TCPConn) WriteRawMsg(args []byte) error {
if tcpConn.closeFlag == true {
if atomic.LoadInt32(&tcpConn.closeFlag) == 1 {
return errors.New("conn is close")
}
@@ -149,7 +150,7 @@ func (tcpConn *TCPConn) WriteRawMsg(args []byte) error {
func (tcpConn *TCPConn) IsConnected() bool {
return tcpConn.closeFlag == false
return atomic.LoadInt32(&tcpConn.closeFlag) == 0
}
func (tcpConn *TCPConn) SetReadDeadline(d time.Duration) {

View File

@@ -20,30 +20,22 @@ type MsgParser struct {
}
func (p *MsgParser) init(){
var max uint32
func (p *MsgParser) getMaxMsgLen(lenMsgLen int) uint32 {
switch p.LenMsgLen {
case 1:
max = math.MaxUint8
return math.MaxUint8
case 2:
max = math.MaxUint16
return math.MaxUint16
case 4:
max = math.MaxUint32
return math.MaxUint32
default:
panic("LenMsgLen value must be 1 or 2 or 4")
}
if p.MinMsgLen > max {
p.MinMsgLen = max
}
if p.MaxMsgLen > max {
p.MaxMsgLen = max
}
p.INetMempool = NewMemAreaPool()
}
func (p *MsgParser) init(){
p.INetMempool = NewMemAreaPool()
}
// goroutine safe
func (p *MsgParser) Read(conn *TCPConn) ([]byte, error) {

View File

@@ -7,14 +7,16 @@ import (
"time"
)
const Default_ReadDeadline = time.Second*30 //30s
const Default_WriteDeadline = time.Second*30 //30s
const Default_MaxConnNum = 9000
const Default_PendingWriteNum = 10000
const Default_LittleEndian = false
const Default_MinMsgLen = 2
const Default_MaxMsgLen = 65535
const Default_LenMsgLen = 2
const(
Default_ReadDeadline = time.Second*30 //默认读超时30s
Default_WriteDeadline = time.Second*30 //默认写超时30s
Default_MaxConnNum = 1000000 //默认最大连接数
Default_PendingWriteNum = 100000 //单连接写消息Channel容量
Default_LittleEndian = false //默认大小端
Default_MinMsgLen = 2 //最小消息长度2byte
Default_LenMsgLen = 2 //包头字段长度占用2byte
Default_MaxMsgLen = 65535 //最大消息长度
)
type TCPServer struct {
Addr string
@@ -29,8 +31,7 @@ type TCPServer struct {
mutexConns sync.Mutex
wgLn sync.WaitGroup
wgConns sync.WaitGroup
// msg parser
MsgParser
}
@@ -49,14 +50,15 @@ func (server *TCPServer) init() {
server.MaxConnNum = Default_MaxConnNum
log.SRelease("invalid MaxConnNum, reset to ", server.MaxConnNum)
}
if server.PendingWriteNum <= 0 {
server.PendingWriteNum = Default_PendingWriteNum
log.SRelease("invalid PendingWriteNum, reset to ", server.PendingWriteNum)
}
if server.MinMsgLen <= 0 {
server.MinMsgLen = Default_MinMsgLen
log.SRelease("invalid MinMsgLen, reset to ", server.MinMsgLen)
if server.LenMsgLen <= 0 {
server.LenMsgLen = Default_LenMsgLen
log.SRelease("invalid LenMsgLen, reset to ", server.LenMsgLen)
}
if server.MaxMsgLen <= 0 {
@@ -64,6 +66,17 @@ func (server *TCPServer) init() {
log.SRelease("invalid MaxMsgLen, reset to ", server.MaxMsgLen)
}
maxMsgLen := server.MsgParser.getMaxMsgLen(server.LenMsgLen)
if server.MaxMsgLen > maxMsgLen {
server.MaxMsgLen = maxMsgLen
log.SRelease("invalid MaxMsgLen, reset to ", maxMsgLen)
}
if server.MinMsgLen <= 0 {
server.MinMsgLen = Default_MinMsgLen
log.SRelease("invalid MinMsgLen, reset to ", server.MinMsgLen)
}
if server.WriteDeadline == 0 {
server.WriteDeadline = Default_WriteDeadline
log.SRelease("invalid WriteDeadline, reset to ", server.WriteDeadline.Seconds(),"s")
@@ -74,18 +87,12 @@ func (server *TCPServer) init() {
log.SRelease("invalid ReadDeadline, reset to ", server.ReadDeadline.Seconds(),"s")
}
if server.LenMsgLen == 0 {
server.LenMsgLen = Default_LenMsgLen
}
if server.NewAgent == nil {
log.SFatal("NewAgent must not be nil")
}
server.ln = ln
server.conns = make(ConnSet)
server.INetMempool = NewMemAreaPool()
server.MsgParser.init()
}

View File

@@ -155,16 +155,21 @@ func initNode(id int) {
//2.顺序安装服务
serviceOrder := cluster.GetCluster().GetLocalNodeInfo().ServiceList
for _,serviceName:= range serviceOrder{
bSetup := false
for _, s := range preSetupService {
if s.GetName() != serviceName {
continue
}
bSetup = true
pServiceCfg := cluster.GetCluster().GetServiceCfg(s.GetName())
s.Init(s, cluster.GetRpcClient, cluster.GetRpcServer, pServiceCfg)
service.Setup(s)
}
if bSetup == false {
log.SFatal("Service name "+serviceName+" configuration error")
}
}
//3.service初始化
@@ -290,9 +295,9 @@ func GetService(serviceName string) service.IService {
return service.GetService(serviceName)
}
func SetConfigDir(configDir string) {
configDir = configDir
cluster.SetConfigDir(configDir)
func SetConfigDir(cfgDir string) {
configDir = cfgDir
cluster.SetConfigDir(cfgDir)
}
func GetConfigDir() string {

View File

@@ -193,9 +193,11 @@ func Report() {
record = prof.record
prof.record = list.New()
callNum := prof.callNum
totalCostTime := prof.totalCostTime
prof.stackLocker.RUnlock()
DefaultReportFunction(name,prof.callNum,prof.totalCostTime,record)
DefaultReportFunction(name,callNum,totalCostTime,record)
}
}

View File

@@ -1,7 +1,6 @@
package rpc
import (
"container/list"
"errors"
"github.com/duanhf2012/origin/network"
"reflect"
@@ -9,25 +8,31 @@ import (
"sync"
"sync/atomic"
"time"
"github.com/duanhf2012/origin/log"
)
const(
DefaultRpcConnNum = 1
DefaultRpcLenMsgLen = 4
DefaultRpcMinMsgLen = 2
DefaultMaxCheckCallRpcCount = 1000
DefaultMaxPendingWriteNum = 200000
DefaultConnectInterval = 2*time.Second
DefaultCheckRpcCallTimeoutInterval = 1*time.Second
DefaultRpcTimeout = 15*time.Second
)
const MaxCheckCallRpcCount = 1000
const MaxPendingWriteNum = 200000
const ConnectInterval = 2*time.Second
const RpcConnNum = 1
const RpcLenMsgLen = 4
const RpcMinMsgLen = 2
const CheckRpcCallTimeoutInterval = 5*time.Second
const DefaultRpcTimeout = 15*time.Second
var clientSeq uint32
type IRealClient interface {
SetConn(conn *network.TCPConn)
Close(waitDone bool)
AsyncCall(rpcHandler IRpcHandler, serviceMethod string, callback reflect.Value, args interface{}, replyParam interface{}) error
Go(rpcHandler IRpcHandler, noReply bool, serviceMethod string, args interface{}, reply interface{}) *Call
RawGo(rpcHandler IRpcHandler,processor IRpcProcessor, noReply bool, rpcMethodId uint32, serviceMethod string, rawArgs []byte, reply interface{}) *Call
AsyncCall(timeout time.Duration,rpcHandler IRpcHandler, serviceMethod string, callback reflect.Value, args interface{}, replyParam interface{},cancelable bool) (CancelRpc,error)
Go(timeout time.Duration,rpcHandler IRpcHandler, noReply bool, serviceMethod string, args interface{}, reply interface{}) *Call
RawGo(timeout time.Duration,rpcHandler IRpcHandler,processor IRpcProcessor, noReply bool, rpcMethodId uint32, serviceMethod string, rawArgs []byte, reply interface{}) *Call
IsConnected() bool
Run()
@@ -39,11 +44,11 @@ type Client struct {
nodeId int
pendingLock sync.RWMutex
startSeq uint64
pending map[uint64]*list.Element
pendingTimer *list.List
pending map[uint64]*Call
callRpcTimeout time.Duration
maxCheckCallRpcCount int
callTimerHeap CallTimerHeap
IRealClient
}
@@ -54,7 +59,6 @@ func (client *Client) NewClientAgent(conn *network.TCPConn) network.Agent {
}
func (bc *Client) makeCallFail(call *Call) {
bc.removePending(call.Seq)
if call.callback != nil && call.callback.IsValid() {
call.rpcHandler.PushRpcResponse(call)
} else {
@@ -64,55 +68,53 @@ func (bc *Client) makeCallFail(call *Call) {
func (bc *Client) checkRpcCallTimeout() {
for{
time.Sleep(CheckRpcCallTimeoutInterval)
now := time.Now()
time.Sleep(DefaultCheckRpcCallTimeoutInterval)
for i := 0; i < bc.maxCheckCallRpcCount; i++ {
bc.pendingLock.Lock()
if bc.pendingTimer == nil {
callSeq := bc.callTimerHeap.PopTimeout()
if callSeq == 0 {
bc.pendingLock.Unlock()
break
}
pElem := bc.pendingTimer.Front()
if pElem == nil {
bc.pendingLock.Unlock()
break
}
pCall := pElem.Value.(*Call)
if now.Sub(pCall.callTime) > bc.callRpcTimeout {
strTimeout := strconv.FormatInt(int64(bc.callRpcTimeout/time.Second), 10)
pCall.Err = errors.New("RPC call takes more than " + strTimeout + " seconds")
bc.makeCallFail(pCall)
pCall := bc.pending[callSeq]
if pCall == nil {
bc.pendingLock.Unlock()
log.SError("callSeq ",callSeq," is not find")
continue
}
delete(bc.pending,callSeq)
strTimeout := strconv.FormatInt(int64(pCall.TimeOut.Seconds()), 10)
pCall.Err = errors.New("RPC call takes more than " + strTimeout + " seconds,method is "+pCall.ServiceMethod)
log.SError(pCall.Err.Error())
bc.makeCallFail(pCall)
bc.pendingLock.Unlock()
break
continue
}
}
}
func (client *Client) InitPending() {
client.pendingLock.Lock()
if client.pending != nil {
for _, v := range client.pending {
v.Value.(*Call).Err = errors.New("node is disconnect")
v.Value.(*Call).done <- v.Value.(*Call)
}
}
client.pending = make(map[uint64]*list.Element, 4096)
client.pendingTimer = list.New()
client.callTimerHeap.Init()
client.pending = make(map[uint64]*Call,4096)
client.pendingLock.Unlock()
}
func (bc *Client) AddPending(call *Call) {
bc.pendingLock.Lock()
call.callTime = time.Now()
elemTimer := bc.pendingTimer.PushBack(call)
bc.pending[call.Seq] = elemTimer //如果下面发送失败,将会一一直存在这里
if call.Seq == 0 {
bc.pendingLock.Unlock()
log.SStack("call is error.")
return
}
bc.pending[call.Seq] = call
bc.callTimerHeap.AddTimer(call.Seq,call.TimeOut)
bc.pendingLock.Unlock()
}
@@ -131,30 +133,45 @@ func (bc *Client) removePending(seq uint64) *Call {
if ok == false {
return nil
}
call := v.Value.(*Call)
bc.pendingTimer.Remove(v)
bc.callTimerHeap.Cancel(seq)
delete(bc.pending, seq)
return call
return v
}
func (bc *Client) FindPending(seq uint64) *Call {
func (bc *Client) FindPending(seq uint64) (pCall *Call) {
if seq == 0 {
return nil
}
bc.pendingLock.Lock()
v, ok := bc.pending[seq]
if ok == false {
bc.pendingLock.Unlock()
return nil
}
pCall := v.Value.(*Call)
pCall = bc.pending[seq]
bc.pendingLock.Unlock()
return pCall
}
func (bc *Client) cleanPending(){
bc.pendingLock.Lock()
for {
callSeq := bc.callTimerHeap.PopFirst()
if callSeq == 0 {
break
}
pCall := bc.pending[callSeq]
if pCall == nil {
log.SError("callSeq ",callSeq," is not find")
continue
}
delete(bc.pending,callSeq)
pCall.Err = errors.New("nodeid is disconnect ")
bc.makeCallFail(pCall)
}
bc.pendingLock.Unlock()
}
func (bc *Client) generateSeq() uint64 {
return atomic.AddUint64(&bc.startSeq, 1)
}

102
rpc/compressor.go Normal file
View File

@@ -0,0 +1,102 @@
package rpc
import (
"runtime"
"errors"
"github.com/pierrec/lz4/v4"
"fmt"
"github.com/duanhf2012/origin/network"
)
var memPool network.INetMempool = network.NewMemAreaPool()
type ICompressor interface {
CompressBlock(src []byte) ([]byte, error) //dst如果有预申请使用dst内存传入nil时内部申请
UncompressBlock(src []byte) ([]byte, error) //dst如果有预申请使用dst内存传入nil时内部申请
CompressBufferCollection(buffer []byte) //压缩的Buffer内存回收
UnCompressBufferCollection(buffer []byte) //解压缩的Buffer内存回收
}
var compressor ICompressor
func init(){
SetCompressor(&Lz4Compressor{})
}
func SetCompressor(cp ICompressor){
compressor = cp
}
type Lz4Compressor struct {
}
func (lc *Lz4Compressor) CompressBlock(src []byte) (dest []byte, err error) {
defer func() {
if r := recover(); r != nil {
buf := make([]byte, 4096)
l := runtime.Stack(buf, false)
errString := fmt.Sprint(r)
err = errors.New("core dump info[" + errString + "]\n" + string(buf[:l]))
}
}()
var c lz4.Compressor
var cnt int
dest = memPool.MakeByteSlice(lz4.CompressBlockBound(len(src))+1)
cnt, err = c.CompressBlock(src, dest[1:])
if err != nil {
memPool.ReleaseByteSlice(dest)
return nil,err
}
ratio := len(src) / cnt
if len(src) % cnt > 0 {
ratio += 1
}
if ratio > 255 {
memPool.ReleaseByteSlice(dest)
return nil,fmt.Errorf("Impermissible errors")
}
dest[0] = uint8(ratio)
dest = dest[:cnt+1]
return
}
func (lc *Lz4Compressor) UncompressBlock(src []byte) (dest []byte, err error) {
defer func() {
if r := recover(); r != nil {
buf := make([]byte, 4096)
l := runtime.Stack(buf, false)
errString := fmt.Sprint(r)
err = errors.New("core dump info[" + errString + "]\n" + string(buf[:l]))
}
}()
radio := uint8(src[0])
if radio == 0 {
return nil,fmt.Errorf("Impermissible errors")
}
dest = memPool.MakeByteSlice(len(src)*int(radio))
cnt, err := lz4.UncompressBlock(src[1:], dest)
if err != nil {
memPool.ReleaseByteSlice(dest)
return nil,err
}
return dest[:cnt],nil
}
func (lc *Lz4Compressor) compressBlockBound(n int) int{
return lz4.CompressBlockBound(n)
}
func (lc *Lz4Compressor) CompressBufferCollection(buffer []byte){
memPool.ReleaseByteSlice(buffer)
}
func (lc *Lz4Compressor) UnCompressBufferCollection(buffer []byte) {
memPool.ReleaseByteSlice(buffer)
}

View File

@@ -41,7 +41,10 @@ func (slf *GoGoPBProcessor) Marshal(v interface{}) ([]byte, error){
}
func (slf *GoGoPBProcessor) Unmarshal(data []byte, msg interface{}) error{
protoMsg := msg.(proto.Message)
protoMsg,ok := msg.(proto.Message)
if ok == false {
return fmt.Errorf("%+v is not of proto.Message type",msg)
}
return proto.Unmarshal(data, protoMsg)
}

View File

@@ -7,6 +7,7 @@ import (
"reflect"
"strings"
"sync/atomic"
"time"
)
//本结点的Client
@@ -36,7 +37,7 @@ func (lc *LClient) SetConn(conn *network.TCPConn){
func (lc *LClient) Close(waitDone bool){
}
func (lc *LClient) Go(rpcHandler IRpcHandler,noReply bool, serviceMethod string, args interface{}, reply interface{}) *Call {
func (lc *LClient) Go(timeout time.Duration,rpcHandler IRpcHandler,noReply bool, serviceMethod string, args interface{}, reply interface{}) *Call {
pLocalRpcServer := rpcHandler.GetRpcServer()()
//判断是否是同一服务
findIndex := strings.Index(serviceMethod, ".")
@@ -44,7 +45,8 @@ func (lc *LClient) Go(rpcHandler IRpcHandler,noReply bool, serviceMethod string,
sErr := errors.New("Call serviceMethod " + serviceMethod + " is error!")
log.SError(sErr.Error())
call := MakeCall()
call.Err = sErr
call.DoError(sErr)
return call
}
@@ -53,29 +55,31 @@ func (lc *LClient) Go(rpcHandler IRpcHandler,noReply bool, serviceMethod string,
//调用自己rpcHandler处理器
err := pLocalRpcServer.myselfRpcHandlerGo(lc.selfClient,serviceName, serviceMethod, args, requestHandlerNull,reply)
call := MakeCall()
if err != nil {
call.Err = err
call.DoError(err)
return call
}
call.done<-call
call.DoOK()
return call
}
//其他的rpcHandler的处理器
return pLocalRpcServer.selfNodeRpcHandlerGo(nil, lc.selfClient, noReply, serviceName, 0, serviceMethod, args, reply, nil)
return pLocalRpcServer.selfNodeRpcHandlerGo(timeout,nil, lc.selfClient, noReply, serviceName, 0, serviceMethod, args, reply, nil)
}
func (rc *LClient) RawGo(rpcHandler IRpcHandler,processor IRpcProcessor, noReply bool, rpcMethodId uint32, serviceName string, rawArgs []byte, reply interface{}) *Call {
func (rc *LClient) RawGo(timeout time.Duration,rpcHandler IRpcHandler,processor IRpcProcessor, noReply bool, rpcMethodId uint32, serviceName string, rawArgs []byte, reply interface{}) *Call {
pLocalRpcServer := rpcHandler.GetRpcServer()()
call := MakeCall()
call.ServiceMethod = serviceName
call.Reply = reply
//服务自我调用
if serviceName == rpcHandler.GetName() {
call := MakeCall()
call.ServiceMethod = serviceName
call.Reply = reply
call.TimeOut = timeout
err := pLocalRpcServer.myselfRpcHandlerGo(rc.selfClient,serviceName, serviceName, rawArgs, requestHandlerNull,nil)
call.Err = err
call.done <- call
@@ -84,11 +88,11 @@ func (rc *LClient) RawGo(rpcHandler IRpcHandler,processor IRpcProcessor, noReply
}
//其他的rpcHandler的处理器
return pLocalRpcServer.selfNodeRpcHandlerGo(processor,rc.selfClient, true, serviceName, rpcMethodId, serviceName, nil, nil, rawArgs)
return pLocalRpcServer.selfNodeRpcHandlerGo(timeout,processor,rc.selfClient, true, serviceName, rpcMethodId, serviceName, nil, nil, rawArgs)
}
func (lc *LClient) AsyncCall(rpcHandler IRpcHandler, serviceMethod string, callback reflect.Value, args interface{}, reply interface{}) error {
func (lc *LClient) AsyncCall(timeout time.Duration,rpcHandler IRpcHandler, serviceMethod string, callback reflect.Value, args interface{}, reply interface{},cancelable bool) (CancelRpc,error) {
pLocalRpcServer := rpcHandler.GetRpcServer()()
//判断是否是同一服务
@@ -97,29 +101,29 @@ func (lc *LClient) AsyncCall(rpcHandler IRpcHandler, serviceMethod string, callb
err := errors.New("Call serviceMethod " + serviceMethod + " is error!")
callback.Call([]reflect.Value{reflect.ValueOf(reply), reflect.ValueOf(err)})
log.SError(err.Error())
return nil
return emptyCancelRpc,nil
}
serviceName := serviceMethod[:findIndex]
//调用自己rpcHandler处理器
if serviceName == rpcHandler.GetName() { //自己服务调用
return pLocalRpcServer.myselfRpcHandlerGo(lc.selfClient,serviceName, serviceMethod, args,callback ,reply)
return emptyCancelRpc,pLocalRpcServer.myselfRpcHandlerGo(lc.selfClient,serviceName, serviceMethod, args,callback ,reply)
}
//其他的rpcHandler的处理器
err := pLocalRpcServer.selfNodeRpcHandlerAsyncGo(lc.selfClient, rpcHandler, false, serviceName, serviceMethod, args, reply, callback)
calcelRpc,err := pLocalRpcServer.selfNodeRpcHandlerAsyncGo(timeout,lc.selfClient, rpcHandler, false, serviceName, serviceMethod, args, reply, callback,cancelable)
if err != nil {
callback.Call([]reflect.Value{reflect.ValueOf(reply), reflect.ValueOf(err)})
}
return nil
return calcelRpc,nil
}
func NewLClient(nodeId int) *Client{
client := &Client{}
client.clientId = atomic.AddUint32(&clientSeq, 1)
client.nodeId = nodeId
client.maxCheckCallRpcCount = MaxCheckCallRpcCount
client.maxCheckCallRpcCount = DefaultMaxCheckCallRpcCount
client.callRpcTimeout = DefaultRpcTimeout
lClient := &LClient{}

View File

@@ -9,10 +9,12 @@ import (
"reflect"
"runtime"
"sync/atomic"
"time"
)
//跨结点连接的Client
type RClient struct {
compressBytesLen int
selfClient *Client
network.TCPClient
conn *network.TCPConn
@@ -40,24 +42,25 @@ func (rc *RClient) SetConn(conn *network.TCPConn){
rc.Unlock()
}
func (rc *RClient) Go(rpcHandler IRpcHandler,noReply bool, serviceMethod string, args interface{}, reply interface{}) *Call {
func (rc *RClient) Go(timeout time.Duration,rpcHandler IRpcHandler,noReply bool, serviceMethod string, args interface{}, reply interface{}) *Call {
_, processor := GetProcessorType(args)
InParam, err := processor.Marshal(args)
if err != nil {
log.SError(err.Error())
call := MakeCall()
call.Err = err
call.DoError(err)
return call
}
return rc.RawGo(rpcHandler,processor, noReply, 0, serviceMethod, InParam, reply)
return rc.RawGo(timeout,rpcHandler,processor, noReply, 0, serviceMethod, InParam, reply)
}
func (rc *RClient) RawGo(rpcHandler IRpcHandler,processor IRpcProcessor, noReply bool, rpcMethodId uint32, serviceMethod string, rawArgs []byte, reply interface{}) *Call {
func (rc *RClient) RawGo(timeout time.Duration,rpcHandler IRpcHandler,processor IRpcProcessor, noReply bool, rpcMethodId uint32, serviceMethod string, rawArgs []byte, reply interface{}) *Call {
call := MakeCall()
call.ServiceMethod = serviceMethod
call.Reply = reply
call.Seq = rc.selfClient.generateSeq()
call.TimeOut = timeout
request := MakeRpcRequest(processor, call.Seq, rpcMethodId, serviceMethod, noReply, rawArgs)
bytes, err := processor.Marshal(request.RpcRequestData)
@@ -65,46 +68,72 @@ func (rc *RClient) RawGo(rpcHandler IRpcHandler,processor IRpcProcessor, noReply
if err != nil {
call.Seq = 0
call.Err = err
log.SError(err.Error())
call.DoError(err)
return call
}
conn := rc.GetConn()
if conn == nil || conn.IsConnected()==false {
call.Seq = 0
call.Err = errors.New(serviceMethod + " was called failed,rpc client is disconnect")
sErr := errors.New(serviceMethod + " was called failed,rpc client is disconnect")
log.SError(sErr.Error())
call.DoError(sErr)
return call
}
var compressBuff[]byte
bCompress := uint8(0)
if rc.compressBytesLen > 0 && len(bytes) >= rc.compressBytesLen {
var cErr error
compressBuff,cErr = compressor.CompressBlock(bytes)
if cErr != nil {
call.Seq = 0
log.SError(cErr.Error())
call.DoError(cErr)
return call
}
if len(compressBuff) < len(bytes) {
bytes = compressBuff
bCompress = 1<<7
}
}
if noReply == false {
rc.selfClient.AddPending(call)
}
err = conn.WriteMsg([]byte{uint8(processor.GetProcessorType())}, bytes)
err = conn.WriteMsg([]byte{uint8(processor.GetProcessorType())|bCompress}, bytes)
if cap(compressBuff) >0 {
compressor.CompressBufferCollection(compressBuff)
}
if err != nil {
rc.selfClient.RemovePending(call.Seq)
log.SError(err.Error())
call.Seq = 0
call.Err = err
call.DoError(err)
}
return call
}
func (rc *RClient) AsyncCall(rpcHandler IRpcHandler, serviceMethod string, callback reflect.Value, args interface{}, replyParam interface{}) error {
err := rc.asyncCall(rpcHandler, serviceMethod, callback, args, replyParam)
func (rc *RClient) AsyncCall(timeout time.Duration,rpcHandler IRpcHandler, serviceMethod string, callback reflect.Value, args interface{}, replyParam interface{},cancelable bool) (CancelRpc,error) {
cancelRpc,err := rc.asyncCall(timeout,rpcHandler, serviceMethod, callback, args, replyParam,cancelable)
if err != nil {
callback.Call([]reflect.Value{reflect.ValueOf(replyParam), reflect.ValueOf(err)})
}
return nil
return cancelRpc,nil
}
func (rc *RClient) asyncCall(rpcHandler IRpcHandler, serviceMethod string, callback reflect.Value, args interface{}, replyParam interface{}) error {
func (rc *RClient) asyncCall(timeout time.Duration,rpcHandler IRpcHandler, serviceMethod string, callback reflect.Value, args interface{}, replyParam interface{},cancelable bool) (CancelRpc,error) {
processorType, processor := GetProcessorType(args)
InParam, herr := processor.Marshal(args)
if herr != nil {
return herr
return emptyCancelRpc,herr
}
seq := rc.selfClient.generateSeq()
@@ -112,12 +141,27 @@ func (rc *RClient) asyncCall(rpcHandler IRpcHandler, serviceMethod string, callb
bytes, err := processor.Marshal(request.RpcRequestData)
ReleaseRpcRequest(request)
if err != nil {
return err
return emptyCancelRpc,err
}
conn := rc.GetConn()
if conn == nil || conn.IsConnected()==false {
return errors.New("Rpc server is disconnect,call " + serviceMethod)
return emptyCancelRpc,errors.New("Rpc server is disconnect,call " + serviceMethod)
}
var compressBuff[]byte
bCompress := uint8(0)
if rc.compressBytesLen>0 &&len(bytes) >= rc.compressBytesLen {
var cErr error
compressBuff,cErr = compressor.CompressBlock(bytes)
if cErr != nil {
return emptyCancelRpc,cErr
}
if len(compressBuff) < len(bytes) {
bytes = compressBuff
bCompress = 1<<7
}
}
call := MakeCall()
@@ -126,18 +170,28 @@ func (rc *RClient) asyncCall(rpcHandler IRpcHandler, serviceMethod string, callb
call.rpcHandler = rpcHandler
call.ServiceMethod = serviceMethod
call.Seq = seq
call.TimeOut = timeout
rc.selfClient.AddPending(call)
err = conn.WriteMsg([]byte{uint8(processorType)}, bytes)
err = conn.WriteMsg([]byte{uint8(processorType)|bCompress}, bytes)
if cap(compressBuff) >0 {
compressor.CompressBufferCollection(compressBuff)
}
if err != nil {
rc.selfClient.RemovePending(call.Seq)
ReleaseCall(call)
return err
return emptyCancelRpc,err
}
return nil
if cancelable {
rpcCancel := RpcCancel{CallSeq:seq,Cli: rc.selfClient}
return rpcCancel.CancelRpc,nil
}
return emptyCancelRpc,nil
}
func (rc *RClient) Run() {
defer func() {
if r := recover(); r != nil {
@@ -156,7 +210,8 @@ func (rc *RClient) Run() {
return
}
processor := GetProcessor(bytes[0])
bCompress := (bytes[0]>>7) > 0
processor := GetProcessor(bytes[0]&0x7f)
if processor == nil {
rc.conn.ReleaseReadMsg(bytes)
log.SError("rpcClient ", rc.Addr, " ReadMsg head error:", err.Error())
@@ -167,14 +222,33 @@ func (rc *RClient) Run() {
response := RpcResponse{}
response.RpcResponseData = processor.MakeRpcResponse(0, "", nil)
err = processor.Unmarshal(bytes[1:], response.RpcResponseData)
//解压缩
byteData := bytes[1:]
var compressBuff []byte
if bCompress == true {
var unCompressErr error
compressBuff,unCompressErr = compressor.UncompressBlock(byteData)
if unCompressErr!= nil {
rc.conn.ReleaseReadMsg(bytes)
log.SError("rpcClient ", rc.Addr, " ReadMsg head error:", unCompressErr.Error())
return
}
byteData = compressBuff
}
err = processor.Unmarshal(byteData, response.RpcResponseData)
if cap(compressBuff) > 0 {
compressor.UnCompressBufferCollection(compressBuff)
}
rc.conn.ReleaseReadMsg(bytes)
if err != nil {
processor.ReleaseRpcResponse(response.RpcResponseData)
log.SError("rpcClient Unmarshal head error:", err.Error())
continue
}
v := rc.selfClient.RemovePending(response.RpcResponseData.GetSeq())
if v == nil {
log.SError("rpcClient cannot find seq ", response.RpcResponseData.GetSeq(), " in pending")
@@ -207,23 +281,23 @@ func (rc *RClient) OnClose() {
rc.TriggerRpcConnEvent(false, rc.selfClient.GetClientId(), rc.selfClient.GetNodeId())
}
func NewRClient(nodeId int, addr string, maxRpcParamLen uint32,triggerRpcConnEvent TriggerRpcConnEvent) *Client{
func NewRClient(nodeId int, addr string, maxRpcParamLen uint32,compressBytesLen int,triggerRpcConnEvent TriggerRpcConnEvent) *Client{
client := &Client{}
client.clientId = atomic.AddUint32(&clientSeq, 1)
client.nodeId = nodeId
client.maxCheckCallRpcCount = MaxCheckCallRpcCount
client.maxCheckCallRpcCount = DefaultMaxCheckCallRpcCount
client.callRpcTimeout = DefaultRpcTimeout
c:= &RClient{}
c.compressBytesLen = compressBytesLen
c.selfClient = client
c.Addr = addr
c.ConnectInterval = ConnectInterval
c.PendingWriteNum = MaxPendingWriteNum
c.ConnectInterval = DefaultConnectInterval
c.PendingWriteNum = DefaultMaxPendingWriteNum
c.AutoReconnect = true
c.TriggerRpcConnEvent = triggerRpcConnEvent
c.ConnNum = RpcConnNum
c.LenMsgLen = RpcLenMsgLen
c.MinMsgLen = RpcMinMsgLen
c.ConnNum = DefaultRpcConnNum
c.LenMsgLen = DefaultRpcLenMsgLen
c.MinMsgLen = DefaultRpcMinMsgLen
c.ReadDeadline = Default_ReadWriteDeadline
c.WriteDeadline = Default_ReadWriteDeadline
c.LittleEndian = LittleEndian
@@ -244,18 +318,6 @@ func NewRClient(nodeId int, addr string, maxRpcParamLen uint32,triggerRpcConnEve
func (rc *RClient) Close(waitDone bool) {
rc.TCPClient.Close(waitDone)
rc.selfClient.pendingLock.Lock()
for {
pElem := rc.selfClient.pendingTimer.Front()
if pElem == nil {
break
}
pCall := pElem.Value.(*Call)
pCall.Err = errors.New("nodeid is disconnect ")
rc.selfClient.makeCallFail(pCall)
}
rc.selfClient.pendingLock.Unlock()
rc.selfClient.cleanPending()
}

View File

@@ -68,7 +68,16 @@ type Call struct {
connId int
callback *reflect.Value
rpcHandler IRpcHandler
callTime time.Time
TimeOut time.Duration
}
type RpcCancel struct {
Cli *Client
CallSeq uint64
}
func (rc *RpcCancel) CancelRpc(){
rc.Cli.RemovePending(rc.CallSeq)
}
func (slf *RpcRequest) Clear() *RpcRequest{
@@ -102,6 +111,15 @@ func (rpcResponse *RpcResponse) Clear() *RpcResponse{
return rpcResponse
}
func (call *Call) DoError(err error){
call.Err = err
call.done <- call
}
func (call *Call) DoOK(){
call.done <- call
}
func (call *Call) Clear() *Call{
call.Seq = 0
call.ServiceMethod = ""
@@ -115,6 +133,8 @@ func (call *Call) Clear() *Call{
call.connId = 0
call.callback = nil
call.rpcHandler = nil
call.TimeOut = 0
return call
}

View File

@@ -9,6 +9,7 @@ import (
"strings"
"unicode"
"unicode/utf8"
"time"
)
const maxClusterNode int = 128
@@ -75,6 +76,9 @@ type IDiscoveryServiceListener interface {
OnUnDiscoveryService(nodeId int, serviceName []string)
}
type CancelRpc func()
func emptyCancelRpc(){}
type IRpcHandler interface {
IRpcHandlerChannel
GetName() string
@@ -83,11 +87,18 @@ type IRpcHandler interface {
HandlerRpcRequest(request *RpcRequest)
HandlerRpcResponseCB(call *Call)
CallMethod(client *Client,ServiceMethod string, param interface{},callBack reflect.Value, reply interface{}) error
AsyncCall(serviceMethod string, args interface{}, callback interface{}) error
Call(serviceMethod string, args interface{}, reply interface{}) error
Go(serviceMethod string, args interface{}) error
AsyncCallNode(nodeId int, serviceMethod string, args interface{}, callback interface{}) error
CallNode(nodeId int, serviceMethod string, args interface{}, reply interface{}) error
AsyncCall(serviceMethod string, args interface{}, callback interface{}) error
AsyncCallNode(nodeId int, serviceMethod string, args interface{}, callback interface{}) error
CallWithTimeout(timeout time.Duration,serviceMethod string, args interface{}, reply interface{}) error
CallNodeWithTimeout(timeout time.Duration,nodeId int, serviceMethod string, args interface{}, reply interface{}) error
AsyncCallWithTimeout(timeout time.Duration,serviceMethod string, args interface{}, callback interface{}) (CancelRpc,error)
AsyncCallNodeWithTimeout(timeout time.Duration,nodeId int, serviceMethod string, args interface{}, callback interface{}) (CancelRpc,error)
Go(serviceMethod string, args interface{}) error
GoNode(nodeId int, serviceMethod string, args interface{}) error
RawGoNode(rpcProcessorType RpcProcessorType, nodeId int, rpcMethodId uint32, serviceName string, rawArgs []byte) error
CastGo(serviceMethod string, args interface{}) error
@@ -138,7 +149,7 @@ func (handler *RpcHandler) isExportedOrBuiltinType(t reflect.Type) bool {
func (handler *RpcHandler) suitableMethods(method reflect.Method) error {
//只有RPC_开头的才能被调用
if strings.Index(method.Name, "RPC_") != 0 {
if strings.Index(method.Name, "RPC_") != 0 && strings.Index(method.Name, "RPC") != 0 {
return nil
}
@@ -291,14 +302,16 @@ func (handler *RpcHandler) HandlerRpcRequest(request *RpcRequest) {
request.requestHandle(nil, RpcError(rErr))
return
}
requestHanle := request.requestHandle
returnValues := v.method.Func.Call(paramList)
errInter := returnValues[0].Interface()
if errInter != nil {
err = errInter.(error)
}
if request.requestHandle != nil && v.hasResponder == false {
request.requestHandle(oParam.Interface(), ConvertError(err))
if v.hasResponder == false && requestHanle != nil {
requestHanle(oParam.Interface(), ConvertError(err))
}
}
@@ -321,7 +334,8 @@ func (handler *RpcHandler) CallMethod(client *Client,ServiceMethod string, param
pCall.callback = &callBack
pCall.Seq = client.generateSeq()
callSeq = pCall.Seq
pCall.TimeOut = DefaultRpcTimeout
pCall.ServiceMethod = ServiceMethod
client.AddPending(pCall)
//有返回值时
@@ -431,7 +445,7 @@ func (handler *RpcHandler) goRpc(processor IRpcProcessor, bCast bool, nodeId int
//2.rpcClient调用
for i := 0; i < count; i++ {
pCall := pClientList[i].Go(handler.rpcHandler,true, serviceMethod, args, nil)
pCall := pClientList[i].Go(DefaultRpcTimeout,handler.rpcHandler,true, serviceMethod, args, nil)
if pCall.Err != nil {
err = pCall.Err
}
@@ -442,7 +456,7 @@ func (handler *RpcHandler) goRpc(processor IRpcProcessor, bCast bool, nodeId int
return err
}
func (handler *RpcHandler) callRpc(nodeId int, serviceMethod string, args interface{}, reply interface{}) error {
func (handler *RpcHandler) callRpc(timeout time.Duration,nodeId int, serviceMethod string, args interface{}, reply interface{}) error {
var pClientList [maxClusterNode]*Client
err, count := handler.funcRpcClient(nodeId, serviceMethod, pClientList[:])
if err != nil {
@@ -458,12 +472,7 @@ func (handler *RpcHandler) callRpc(nodeId int, serviceMethod string, args interf
}
pClient := pClientList[0]
pCall := pClient.Go(handler.rpcHandler,false, serviceMethod, args, reply)
if pCall.Err != nil {
err = pCall.Err
ReleaseCall(pCall)
return err
}
pCall := pClient.Go(timeout,handler.rpcHandler,false, serviceMethod, args, reply)
err = pCall.Done().Err
pClient.RemovePending(pCall.Seq)
@@ -471,24 +480,24 @@ func (handler *RpcHandler) callRpc(nodeId int, serviceMethod string, args interf
return err
}
func (handler *RpcHandler) asyncCallRpc(nodeId int, serviceMethod string, args interface{}, callback interface{}) error {
func (handler *RpcHandler) asyncCallRpc(timeout time.Duration,nodeId int, serviceMethod string, args interface{}, callback interface{}) (CancelRpc,error) {
fVal := reflect.ValueOf(callback)
if fVal.Kind() != reflect.Func {
err := errors.New("call " + serviceMethod + " input callback param is error!")
log.SError(err.Error())
return err
return emptyCancelRpc,err
}
if fVal.Type().NumIn() != 2 {
err := errors.New("call " + serviceMethod + " callback param function is error!")
log.SError(err.Error())
return err
return emptyCancelRpc,err
}
if fVal.Type().In(0).Kind() != reflect.Ptr || fVal.Type().In(1).String() != "error" {
err := errors.New("call " + serviceMethod + " callback param function is error!")
log.SError(err.Error())
return err
return emptyCancelRpc,err
}
reply := reflect.New(fVal.Type().In(0).Elem()).Interface()
@@ -496,27 +505,27 @@ func (handler *RpcHandler) asyncCallRpc(nodeId int, serviceMethod string, args i
err, count := handler.funcRpcClient(nodeId, serviceMethod, pClientList[:])
if count == 0 || err != nil {
if err == nil {
err = fmt.Errorf("cannot find %s from nodeId %d",serviceMethod,nodeId)
if nodeId > 0 {
err = fmt.Errorf("cannot find %s from nodeId %d",serviceMethod,nodeId)
}else {
err = fmt.Errorf("No %s service found in the origin network",serviceMethod)
}
}
fVal.Call([]reflect.Value{reflect.ValueOf(reply), reflect.ValueOf(err)})
log.SError("Call serviceMethod is error:", err.Error())
return nil
return emptyCancelRpc,nil
}
if count > 1 {
err := errors.New("cannot call more then 1 node")
fVal.Call([]reflect.Value{reflect.ValueOf(reply), reflect.ValueOf(err)})
log.SError(err.Error())
return nil
return emptyCancelRpc,nil
}
//2.rpcClient调用
//如果调用本结点服务
pClient := pClientList[0]
pClient.AsyncCall(handler.rpcHandler, serviceMethod, fVal, args, reply)
return nil
return pClientList[0].AsyncCall(timeout,handler.rpcHandler, serviceMethod, fVal, args, reply,false)
}
func (handler *RpcHandler) GetName() string {
@@ -527,12 +536,29 @@ func (handler *RpcHandler) IsSingleCoroutine() bool {
return handler.rpcHandler.IsSingleCoroutine()
}
func (handler *RpcHandler) CallWithTimeout(timeout time.Duration,serviceMethod string, args interface{}, reply interface{}) error {
return handler.callRpc(timeout,0, serviceMethod, args, reply)
}
func (handler *RpcHandler) CallNodeWithTimeout(timeout time.Duration,nodeId int, serviceMethod string, args interface{}, reply interface{}) error{
return handler.callRpc(timeout,nodeId, serviceMethod, args, reply)
}
func (handler *RpcHandler) AsyncCallWithTimeout(timeout time.Duration,serviceMethod string, args interface{}, callback interface{}) (CancelRpc,error){
return handler.asyncCallRpc(timeout,0, serviceMethod, args, callback)
}
func (handler *RpcHandler) AsyncCallNodeWithTimeout(timeout time.Duration,nodeId int, serviceMethod string, args interface{}, callback interface{}) (CancelRpc,error){
return handler.asyncCallRpc(timeout,nodeId, serviceMethod, args, callback)
}
func (handler *RpcHandler) AsyncCall(serviceMethod string, args interface{}, callback interface{}) error {
return handler.asyncCallRpc(0, serviceMethod, args, callback)
_,err := handler.asyncCallRpc(DefaultRpcTimeout,0, serviceMethod, args, callback)
return err
}
func (handler *RpcHandler) Call(serviceMethod string, args interface{}, reply interface{}) error {
return handler.callRpc(0, serviceMethod, args, reply)
return handler.callRpc(DefaultRpcTimeout,0, serviceMethod, args, reply)
}
func (handler *RpcHandler) Go(serviceMethod string, args interface{}) error {
@@ -540,11 +566,13 @@ func (handler *RpcHandler) Go(serviceMethod string, args interface{}) error {
}
func (handler *RpcHandler) AsyncCallNode(nodeId int, serviceMethod string, args interface{}, callback interface{}) error {
return handler.asyncCallRpc(nodeId, serviceMethod, args, callback)
_,err:= handler.asyncCallRpc(DefaultRpcTimeout,nodeId, serviceMethod, args, callback)
return err
}
func (handler *RpcHandler) CallNode(nodeId int, serviceMethod string, args interface{}, reply interface{}) error {
return handler.callRpc(nodeId, serviceMethod, args, reply)
return handler.callRpc(DefaultRpcTimeout,nodeId, serviceMethod, args, reply)
}
func (handler *RpcHandler) GoNode(nodeId int, serviceMethod string, args interface{}) error {
@@ -572,7 +600,7 @@ func (handler *RpcHandler) RawGoNode(rpcProcessorType RpcProcessorType, nodeId i
//如果调用本结点服务
for i := 0; i < count; i++ {
//跨node调用
pCall := handler.pClientList[i].RawGo(handler.rpcHandler,processor, true, rpcMethodId, serviceName, rawArgs, nil)
pCall := handler.pClientList[i].RawGo(DefaultRpcTimeout,handler.rpcHandler,processor, true, rpcMethodId, serviceName, rawArgs, nil)
if pCall.Err != nil {
err = pCall.Err
}

89
rpc/rpctimer.go Normal file
View File

@@ -0,0 +1,89 @@
package rpc
import (
"container/heap"
"time"
)
type CallTimer struct {
SeqId uint64
FireTime int64
}
type CallTimerHeap struct {
callTimer []CallTimer
mapSeqIndex map[uint64]int
}
func (h *CallTimerHeap) Init() {
h.mapSeqIndex = make(map[uint64]int, 4096)
h.callTimer = make([]CallTimer, 0, 4096)
}
func (h *CallTimerHeap) Len() int {
return len(h.callTimer)
}
func (h *CallTimerHeap) Less(i, j int) bool {
return h.callTimer[i].FireTime < h.callTimer[j].FireTime
}
func (h *CallTimerHeap) Swap(i, j int) {
h.callTimer[i], h.callTimer[j] = h.callTimer[j], h.callTimer[i]
h.mapSeqIndex[h.callTimer[i].SeqId] = i
h.mapSeqIndex[h.callTimer[j].SeqId] = j
}
func (h *CallTimerHeap) Push(t any) {
callTimer := t.(CallTimer)
h.mapSeqIndex[callTimer.SeqId] = len(h.callTimer)
h.callTimer = append(h.callTimer, callTimer)
}
func (h *CallTimerHeap) Pop() any {
l := len(h.callTimer)
seqId := h.callTimer[l-1].SeqId
h.callTimer = h.callTimer[:l-1]
delete(h.mapSeqIndex, seqId)
return seqId
}
func (h *CallTimerHeap) Cancel(seq uint64) bool {
index, ok := h.mapSeqIndex[seq]
if ok == false {
return false
}
heap.Remove(h, index)
return true
}
func (h *CallTimerHeap) AddTimer(seqId uint64,d time.Duration){
heap.Push(h, CallTimer{
SeqId: seqId,
FireTime: time.Now().Add(d).UnixNano(),
})
}
func (h *CallTimerHeap) PopTimeout() uint64 {
if h.Len() == 0 {
return 0
}
nextFireTime := h.callTimer[0].FireTime
if nextFireTime > time.Now().UnixNano() {
return 0
}
return heap.Pop(h).(uint64)
}
func (h *CallTimerHeap) PopFirst() uint64 {
if h.Len() == 0 {
return 0
}
return heap.Pop(h).(uint64)
}

View File

@@ -27,6 +27,8 @@ type Server struct {
functions map[interface{}]interface{}
rpcHandleFinder RpcHandleFinder
rpcServer *network.TCPServer
compressBytesLen int
}
type RpcAgent struct {
@@ -64,15 +66,15 @@ func (server *Server) Init(rpcHandleFinder RpcHandleFinder) {
const Default_ReadWriteDeadline = 15*time.Second
func (server *Server) Start(listenAddr string, maxRpcParamLen uint32) {
func (server *Server) Start(listenAddr string, maxRpcParamLen uint32,compressBytesLen int) {
splitAddr := strings.Split(listenAddr, ":")
if len(splitAddr) != 2 {
log.SFatal("listen addr is error :", listenAddr)
}
server.rpcServer.Addr = ":" + splitAddr[1]
server.rpcServer.LenMsgLen = 4 //uint16
server.rpcServer.MinMsgLen = 2
server.compressBytesLen = compressBytesLen
if maxRpcParamLen > 0 {
server.rpcServer.MaxMsgLen = maxRpcParamLen
} else {
@@ -85,6 +87,8 @@ func (server *Server) Start(listenAddr string, maxRpcParamLen uint32) {
server.rpcServer.LittleEndian = LittleEndian
server.rpcServer.WriteDeadline = Default_ReadWriteDeadline
server.rpcServer.ReadDeadline = Default_ReadWriteDeadline
server.rpcServer.LenMsgLen = DefaultRpcLenMsgLen
server.rpcServer.Start()
}
@@ -111,7 +115,26 @@ func (agent *RpcAgent) WriteResponse(processor IRpcProcessor, serviceMethod stri
return
}
errM = agent.conn.WriteMsg([]byte{uint8(processor.GetProcessorType())}, bytes)
var compressBuff[]byte
bCompress := uint8(0)
if agent.rpcServer.compressBytesLen >0 && len(bytes) >= agent.rpcServer.compressBytesLen {
var cErr error
compressBuff,cErr = compressor.CompressBlock(bytes)
if cErr != nil {
log.SError("service method ", serviceMethod, " CompressBlock error:", cErr.Error())
return
}
if len(compressBuff) < len(bytes) {
bytes = compressBuff
bCompress = 1<<7
}
}
errM = agent.conn.WriteMsg([]byte{uint8(processor.GetProcessorType())|bCompress}, bytes)
if cap(compressBuff) >0 {
compressor.CompressBufferCollection(compressBuff)
}
if errM != nil {
log.SError("Rpc ", serviceMethod, " return is error:", errM.Error())
}
@@ -126,16 +149,34 @@ func (agent *RpcAgent) Run() {
break
}
processor := GetProcessor(data[0])
bCompress := (data[0]>>7) > 0
processor := GetProcessor(data[0]&0x7f)
if processor == nil {
agent.conn.ReleaseReadMsg(data)
log.SError("remote rpc ", agent.conn.RemoteAddr(), " cannot find processor:", data[0])
log.SError("remote rpc ", agent.conn.RemoteAddr().String(), " cannot find processor:", data[0])
return
}
//解析head
var compressBuff []byte
byteData := data[1:]
if bCompress == true {
var unCompressErr error
compressBuff,unCompressErr = compressor.UncompressBlock(byteData)
if unCompressErr!= nil {
agent.conn.ReleaseReadMsg(data)
log.SError("rpcClient ", agent.conn.RemoteAddr().String(), " ReadMsg head error:", unCompressErr.Error())
return
}
byteData = compressBuff
}
req := MakeRpcRequest(processor, 0, 0, "", false, nil)
err = processor.Unmarshal(data[1:], req.RpcRequestData)
err = processor.Unmarshal(byteData, req.RpcRequestData)
if cap(compressBuff) > 0 {
compressor.UnCompressBufferCollection(compressBuff)
}
agent.conn.ReleaseReadMsg(data)
if err != nil {
log.SError("rpc Unmarshal request is error:", err.Error())
@@ -147,7 +188,6 @@ func (agent *RpcAgent) Run() {
ReleaseRpcRequest(req)
continue
} else {
//will close tcpconn
ReleaseRpcRequest(req)
break
}
@@ -248,18 +288,18 @@ func (server *Server) myselfRpcHandlerGo(client *Client,handlerName string, serv
return rpcHandler.CallMethod(client,serviceMethod, args,callBack, reply)
}
func (server *Server) selfNodeRpcHandlerGo(processor IRpcProcessor, client *Client, noReply bool, handlerName string, rpcMethodId uint32, serviceMethod string, args interface{}, reply interface{}, rawArgs []byte) *Call {
func (server *Server) selfNodeRpcHandlerGo(timeout time.Duration,processor IRpcProcessor, client *Client, noReply bool, handlerName string, rpcMethodId uint32, serviceMethod string, args interface{}, reply interface{}, rawArgs []byte) *Call {
pCall := MakeCall()
pCall.Seq = client.generateSeq()
pCall.TimeOut = timeout
pCall.ServiceMethod = serviceMethod
rpcHandler := server.rpcHandleFinder.FindRpcHandler(handlerName)
if rpcHandler == nil {
err := errors.New("service method " + serviceMethod + " not config!")
log.SError(err.Error())
pCall.Seq = 0
pCall.Err = errors.New("service method " + serviceMethod + " not config!")
pCall.done <- pCall
log.SError(pCall.Err.Error())
pCall.DoError(err)
return pCall
}
@@ -273,10 +313,10 @@ func (server *Server) selfNodeRpcHandlerGo(processor IRpcProcessor, client *Clie
var err error
iParam,err = processor.Clone(args)
if err != nil {
sErr := errors.New("RpcHandler " + handlerName + "."+serviceMethod+" deep copy inParam is error:" + err.Error())
log.SError(sErr.Error())
pCall.Seq = 0
pCall.Err = errors.New("RpcHandler " + handlerName + "."+serviceMethod+" deep copy inParam is error:" + err.Error())
pCall.done <- pCall
log.SError(pCall.Err.Error())
pCall.DoError(sErr)
return pCall
}
@@ -289,9 +329,10 @@ func (server *Server) selfNodeRpcHandlerGo(processor IRpcProcessor, client *Clie
var err error
req.inParam, err = rpcHandler.UnmarshalInParam(processor, serviceMethod, rpcMethodId, rawArgs)
if err != nil {
log.SError(err.Error())
pCall.Seq = 0
pCall.DoError(err)
ReleaseRpcRequest(req)
pCall.Err = err
pCall.done <- pCall
return pCall
}
}
@@ -303,20 +344,83 @@ func (server *Server) selfNodeRpcHandlerGo(processor IRpcProcessor, client *Clie
if reply != nil && Returns != reply && Returns != nil {
byteReturns, err := req.rpcProcessor.Marshal(Returns)
if err != nil {
log.SError("returns data cannot be marshal ", callSeq)
ReleaseRpcRequest(req)
}
err = req.rpcProcessor.Unmarshal(byteReturns, reply)
if err != nil {
log.SError("returns data cannot be Unmarshal ", callSeq)
ReleaseRpcRequest(req)
Err = ConvertError(err)
log.SError("returns data cannot be marshal,callSeq is ", callSeq," error is ",err.Error())
}else{
err = req.rpcProcessor.Unmarshal(byteReturns, reply)
if err != nil {
Err = ConvertError(err)
log.SError("returns data cannot be Unmarshal,callSeq is ", callSeq," error is ",err.Error())
}
}
}
ReleaseRpcRequest(req)
v := client.RemovePending(callSeq)
if v == nil {
log.SError("rpcClient cannot find seq ",callSeq, " in pending")
return
}
if len(Err) == 0 {
v.Err = nil
v.DoOK()
} else {
log.SError(Err.Error())
v.DoError(Err)
}
}
}
err := rpcHandler.PushRpcRequest(req)
if err != nil {
log.SError(err.Error())
pCall.DoError(err)
ReleaseRpcRequest(req)
}
return pCall
}
func (server *Server) selfNodeRpcHandlerAsyncGo(timeout time.Duration,client *Client, callerRpcHandler IRpcHandler, noReply bool, handlerName string, serviceMethod string, args interface{}, reply interface{}, callback reflect.Value,cancelable bool) (CancelRpc,error) {
rpcHandler := server.rpcHandleFinder.FindRpcHandler(handlerName)
if rpcHandler == nil {
err := errors.New("service method " + serviceMethod + " not config!")
log.SError(err.Error())
return emptyCancelRpc,err
}
_, processor := GetProcessorType(args)
iParam,err := processor.Clone(args)
if err != nil {
errM := errors.New("RpcHandler " + handlerName + "."+serviceMethod+" deep copy inParam is error:" + err.Error())
log.SError(errM.Error())
return emptyCancelRpc,errM
}
req := MakeRpcRequest(processor, 0, 0, serviceMethod, noReply, nil)
req.inParam = iParam
req.localReply = reply
cancelRpc := emptyCancelRpc
var callSeq uint64
if noReply == false {
callSeq = client.generateSeq()
pCall := MakeCall()
pCall.Seq = callSeq
pCall.rpcHandler = callerRpcHandler
pCall.callback = &callback
pCall.Reply = reply
pCall.ServiceMethod = serviceMethod
pCall.TimeOut = timeout
client.AddPending(pCall)
rpcCancel := RpcCancel{CallSeq: callSeq,Cli: client}
cancelRpc = rpcCancel.CancelRpc
req.requestHandle = func(Returns interface{}, Err RpcError) {
v := client.RemovePending(callSeq)
if v == nil {
ReleaseRpcRequest(req)
return
}
@@ -325,68 +429,11 @@ func (server *Server) selfNodeRpcHandlerGo(processor IRpcProcessor, client *Clie
} else {
v.Err = Err
}
v.done <- v
ReleaseRpcRequest(req)
}
}
err := rpcHandler.PushRpcRequest(req)
if err != nil {
ReleaseRpcRequest(req)
pCall.Err = err
pCall.done <- pCall
}
return pCall
}
func (server *Server) selfNodeRpcHandlerAsyncGo(client *Client, callerRpcHandler IRpcHandler, noReply bool, handlerName string, serviceMethod string, args interface{}, reply interface{}, callback reflect.Value) error {
rpcHandler := server.rpcHandleFinder.FindRpcHandler(handlerName)
if rpcHandler == nil {
err := errors.New("service method " + serviceMethod + " not config!")
log.SError(err.Error())
return err
}
_, processor := GetProcessorType(args)
iParam,err := processor.Clone(args)
if err != nil {
errM := errors.New("RpcHandler " + handlerName + "."+serviceMethod+" deep copy inParam is error:" + err.Error())
log.SError(errM.Error())
return errM
}
req := MakeRpcRequest(processor, 0, 0, serviceMethod, noReply, nil)
req.inParam = iParam
req.localReply = reply
if noReply == false {
callSeq := client.generateSeq()
pCall := MakeCall()
pCall.Seq = callSeq
pCall.rpcHandler = callerRpcHandler
pCall.callback = &callback
pCall.Reply = reply
pCall.ServiceMethod = serviceMethod
client.AddPending(pCall)
req.requestHandle = func(Returns interface{}, Err RpcError) {
v := client.RemovePending(callSeq)
if v == nil {
log.SError("rpcClient cannot find seq ", pCall.Seq, " in pending")
//ReleaseCall(pCall)
ReleaseRpcRequest(req)
return
}
if len(Err) == 0 {
pCall.Err = nil
} else {
pCall.Err = Err
}
if Returns != nil {
pCall.Reply = Returns
v.Reply = Returns
}
pCall.rpcHandler.PushRpcResponse(pCall)
v.rpcHandler.PushRpcResponse(v)
ReleaseRpcRequest(req)
}
}
@@ -394,8 +441,11 @@ func (server *Server) selfNodeRpcHandlerAsyncGo(client *Client, callerRpcHandler
err = rpcHandler.PushRpcRequest(req)
if err != nil {
ReleaseRpcRequest(req)
return err
if callSeq > 0 {
client.RemovePending(callSeq)
}
return emptyCancelRpc,err
}
return nil
return cancelRpc,nil
}

View File

@@ -20,6 +20,7 @@ var timerDispatcherLen = 100000
var maxServiceEventChannelNum = 2000000
type IService interface {
concurrent.IConcurrent
Init(iService IService,getClientFun rpc.FuncRpcClient,getServerFun rpc.FuncRpcServer,serviceCfg interface{})
Stop()
Start()

View File

@@ -213,7 +213,7 @@ func (cs *CustomerSubscriber) publishToCustomer(topicData []TopicData) bool {
}
//推送数据
err := cs.CallNode(cs.fromNodeId, cs.callBackRpcMethod, &dbQueuePublishReq, &dbQueuePushRes)
err := cs.CallNodeWithTimeout(4*time.Minute,cs.fromNodeId, cs.callBackRpcMethod, &dbQueuePublishReq, &dbQueuePushRes)
if err != nil {
time.Sleep(time.Second * 1)
continue

View File

@@ -1,18 +1,49 @@
package messagequeueservice
import (
"errors"
"fmt"
"github.com/duanhf2012/origin/log"
"github.com/duanhf2012/origin/service"
"github.com/duanhf2012/origin/sysmodule/mongodbmodule"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo/options"
"sunserver/common/util"
"time"
)
const MaxDays = 180
type DataType interface {
int | uint | int64 | uint64 | float32 | float64 | int32 | uint32 | int16 | uint16
}
func convertToNumber[DType DataType](val interface{}) (error, DType) {
switch val.(type) {
case int64:
return nil, DType(val.(int64))
case int:
return nil, DType(val.(int))
case uint:
return nil, DType(val.(uint))
case uint64:
return nil, DType(val.(uint64))
case float32:
return nil, DType(val.(float32))
case float64:
return nil, DType(val.(float64))
case int32:
return nil, DType(val.(int32))
case uint32:
return nil, DType(val.(uint32))
case int16:
return nil, DType(val.(int16))
case uint16:
return nil, DType(val.(uint16))
}
return errors.New("unsupported type"), 0
}
type MongoPersist struct {
service.Module
mongo mongodbmodule.MongoModule
@@ -363,7 +394,7 @@ func (mp *MongoPersist) GetIndex(topicData *TopicData) uint64 {
for _, e := range document {
if e.Key == "_id" {
errC, seq := util.ConvertToNumber[uint64](e.Value)
errC, seq := convertToNumber[uint64](e.Value)
if errC != nil {
log.Error("value is error:%s,%+v, ", errC.Error(), e.Value)
}

View File

@@ -6,9 +6,9 @@ import (
"github.com/duanhf2012/origin/rpc"
"github.com/duanhf2012/origin/service"
"github.com/duanhf2012/origin/sysmodule/mongodbmodule"
"github.com/duanhf2012/origin/util/coroutine"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo/options"
"runtime"
"sync"
"sync/atomic"
"time"
@@ -71,7 +71,9 @@ func (mp *MongoPersist) OnInit() error {
}
//开启协程
coroutine.GoRecover(mp.persistCoroutine,-1)
mp.waitGroup.Add(1)
go mp.persistCoroutine()
return nil
}
@@ -260,7 +262,6 @@ func (mp *MongoPersist) JugeTimeoutSave() bool{
}
func (mp *MongoPersist) persistCoroutine(){
mp.waitGroup.Add(1)
defer mp.waitGroup.Done()
for atomic.LoadInt32(&mp.stop)==0 || mp.hasPersistData(){
//间隔时间sleep
@@ -291,6 +292,15 @@ func (mp *MongoPersist) hasPersistData() bool{
}
func (mp *MongoPersist) saveToDB(){
defer func() {
if r := recover(); r != nil {
buf := make([]byte, 4096)
l := runtime.Stack(buf, false)
errString := fmt.Sprint(r)
log.SError(" Core dump info[", errString, "]\n", string(buf[:l]))
}
}()
//1.copy数据
mp.Lock()
mapRemoveRankData := mp.mapRemoveRankData

View File

@@ -356,12 +356,12 @@ func (rs *RankSkip) GetRankNodeDataByRank(rank uint64) (*RankData, uint64) {
// GetRankKeyPrevToLimit 获取key前count名的数据
func (rs *RankSkip) GetRankKeyPrevToLimit(findKey, count uint64, result *rpc.RankDataList) error {
if rs.GetRankLen() <= 0 {
return fmt.Errorf("rank[", rs.rankId, "] no data")
return fmt.Errorf("rank[%d] no data", rs.rankId)
}
findData, ok := rs.mapRankData[findKey]
if ok == false {
return fmt.Errorf("rank[", rs.rankId, "] no data")
return fmt.Errorf("rank[%d] no data", rs.rankId)
}
_, rankPos := rs.skipList.GetWithPosition(findData)
@@ -385,12 +385,12 @@ func (rs *RankSkip) GetRankKeyPrevToLimit(findKey, count uint64, result *rpc.Ran
// GetRankKeyPrevToLimit 获取key前count名的数据
func (rs *RankSkip) GetRankKeyNextToLimit(findKey, count uint64, result *rpc.RankDataList) error {
if rs.GetRankLen() <= 0 {
return fmt.Errorf("rank[", rs.rankId, "] no data")
return fmt.Errorf("rank[%d] no data", rs.rankId)
}
findData, ok := rs.mapRankData[findKey]
if ok == false {
return fmt.Errorf("rank[", rs.rankId, "] no data")
return fmt.Errorf("rank[%d] no data", rs.rankId)
}
_, rankPos := rs.skipList.GetWithPosition(findData)

View File

@@ -90,6 +90,10 @@ func (tcpService *TcpService) OnInit() error{
if ok == true {
tcpService.tcpServer.LittleEndian = LittleEndian.(bool)
}
LenMsgLen,ok := tcpCfg["LenMsgLen"]
if ok == true {
tcpService.tcpServer.LenMsgLen = int(LenMsgLen.(float64))
}
MinMsgLen,ok := tcpCfg["MinMsgLen"]
if ok == true {
tcpService.tcpServer.MinMsgLen = uint32(MinMsgLen.(float64))

View File

@@ -7,6 +7,7 @@ import (
"reflect"
"runtime"
"time"
"sync/atomic"
)
// ITimer
@@ -29,7 +30,7 @@ type OnAddTimer func(timer ITimer)
// Timer
type Timer struct {
Id uint64
cancelled bool //是否关闭
cancelled int32 //是否关闭
C chan ITimer //定时器管道
interval time.Duration // 时间间隔(用于循环定时器)
fireTime time.Time // 触发时间
@@ -171,12 +172,12 @@ func (t *Timer) GetInterval() time.Duration {
}
func (t *Timer) Cancel() {
t.cancelled = true
atomic.StoreInt32(&t.cancelled,1)
}
// 判断定时器是否已经取消
func (t *Timer) IsActive() bool {
return !t.cancelled
return atomic.LoadInt32(&t.cancelled) == 0
}
func (t *Timer) GetName() string {