Compare commits

...

22 Commits

Author SHA1 Message Date
boyce
d225bb4bd2 优化启动参数logchannelcap默认值 2024-06-25 09:20:03 +08:00
boyce
ea37fb5081 新增日志接口--SetSkip/GetSkip 2024-06-24 15:29:54 +08:00
boyce
0a92f48d0b 优化node状态 2024-06-20 09:10:18 +08:00
boyce
f5e86fee02 新增Stop接口,断开mongo连接 2024-06-19 15:51:31 +08:00
boyce
166facc959 优化mongodb驱动 2024-06-18 10:07:54 +08:00
boyce
5bb747201b 补充readme说明 2024-06-14 17:46:58 +08:00
boyce
1014bc54e4 优化网络处理器 2024-06-14 16:21:52 +08:00
boyce
9c26c742fe 优化协程池退出 2024-06-14 15:42:23 +08:00
boyce
d1935b1bbc 优化日志 2024-05-27 18:09:37 +08:00
boyce
90d54bf3e2 1.新增服务和Global配置接口,支持通过结构体解析
2.优化日志
2024-05-15 10:06:50 +08:00
boyce
78cc33c84e 优化服务模板的配置读取 2024-05-11 15:11:24 +08:00
boyce
9cf21bf418 1.新增模板服务
2.优化消息队列
3.优化相关日志
2024-05-11 14:42:34 +08:00
boyce
c6d0bd9a19 优化gin模块 2024-05-08 14:18:39 +08:00
boyce
61bf95e457 优化RankService 2024-05-07 18:57:38 +08:00
boyce
8b2a551ee5 优化gin模块 2024-05-06 10:51:51 +08:00
boyce
927c2ffa37 优化gin模块 2024-05-06 10:36:04 +08:00
boyce
b23b30aac5 优化服务发现 2024-04-30 19:05:44 +08:00
boyce
03f8ba0316 精简事件通知 2024-04-30 17:33:34 +08:00
boyce
277480a7f0 Merge branch 'v2' of https://github.com/duanhf2012/origin into v2 2024-04-29 09:24:04 +08:00
boyce
647a654a36 补充优化说明文档 2024-04-29 09:23:54 +08:00
boyce
de483a88f1 优化停服 2024-04-28 18:02:20 +08:00
boyce
bbbb511b5f 新增kafka模块 2024-04-28 11:21:59 +08:00
34 changed files with 1620 additions and 647 deletions

821
README.md

File diff suppressed because it is too large Load Diff

View File

@@ -8,6 +8,8 @@ import (
"strings"
"sync"
"github.com/duanhf2012/origin/v2/event"
"errors"
"reflect"
)
var configDir = "./config/"
@@ -54,7 +56,6 @@ type Cluster struct {
discoveryInfo DiscoveryInfo //服务发现配置
rpcMode RpcMode
//masterDiscoveryNodeList []NodeInfo //配置发现Master结点
globalCfg interface{} //全局配置
localServiceCfg map[string]interface{} //map[serviceName]配置数据*
@@ -63,6 +64,7 @@ type Cluster struct {
locker sync.RWMutex //结点与服务关系保护锁
mapRpc map[string]*NodeRpcInfo //nodeId
mapServiceNode map[string]map[string]struct{} //map[serviceName]map[NodeId]
mapTemplateServiceNode map[string]map[string]struct{} //map[templateServiceName]map[serviceName]nodeId
callSet rpc.CallSet
rpcNats rpc.RpcNats
@@ -70,7 +72,6 @@ type Cluster struct {
rpcEventLocker sync.RWMutex //Rpc事件监听保护锁
mapServiceListenRpcEvent map[string]struct{} //ServiceName
mapServiceListenDiscoveryEvent map[string]struct{} //ServiceName
}
func GetCluster() *Cluster {
@@ -139,6 +140,20 @@ func (cls *Cluster) delServiceNode(serviceName string, nodeId string) {
return
}
//处理模板服务
splitServiceName := strings.Split(serviceName,":")
if len(splitServiceName) == 2 {
serviceName = splitServiceName[0]
templateServiceName := splitServiceName[1]
mapService := cls.mapTemplateServiceNode[templateServiceName]
delete(mapService,serviceName)
if len(cls.mapTemplateServiceNode[templateServiceName]) == 0 {
delete(cls.mapTemplateServiceNode,templateServiceName)
}
}
mapNode := cls.mapServiceNode[serviceName]
delete(mapNode, nodeId)
if len(mapNode) == 0 {
@@ -173,7 +188,20 @@ func (cls *Cluster) serviceDiscoverySetNodeInfo(nodeInfo *NodeInfo) {
continue
}
mapDuplicate[serviceName] = nil
if _, ok := cls.mapServiceNode[serviceName]; ok == false {
//如果是模板服务,则记录模板关系
splitServiceName := strings.Split(serviceName,":")
if len(splitServiceName) == 2 {
serviceName = splitServiceName[0]
templateServiceName := splitServiceName[1]
//记录模板
if _, ok = cls.mapTemplateServiceNode[templateServiceName]; ok == false {
cls.mapTemplateServiceNode[templateServiceName]=map[string]struct{}{}
}
cls.mapTemplateServiceNode[templateServiceName][serviceName] = struct{}{}
}
if _, ok = cls.mapServiceNode[serviceName]; ok == false {
cls.mapServiceNode[serviceName] = make(map[string]struct{}, 1)
}
cls.mapServiceNode[serviceName][nodeInfo.NodeId] = struct{}{}
@@ -228,8 +256,6 @@ func (cls *Cluster) Init(localNodeId string, setupServiceFun SetupServiceFun) er
}
service.RegRpcEventFun = cls.RegRpcEvent
service.UnRegRpcEventFun = cls.UnRegRpcEvent
service.RegDiscoveryServiceEventFun = cls.RegDiscoveryEvent
service.UnRegDiscoveryServiceEventFun = cls.UnReDiscoveryEvent
err = cls.serviceDiscovery.InitDiscovery(localNodeId, cls.serviceDiscoveryDelNode, cls.serviceDiscoverySetNodeInfo)
if err != nil {
@@ -263,25 +289,29 @@ func (cls *Cluster) GetRpcClient(nodeId string) (*rpc.Client,bool) {
return cls.getRpcClient(nodeId)
}
func GetRpcClient(nodeId string, serviceMethod string,filterRetire bool, clientList []*rpc.Client) (error, int) {
func GetNodeIdByTemplateService(templateServiceName string, rpcClientList []*rpc.Client, filterRetire bool) (error, []*rpc.Client) {
return GetCluster().GetNodeIdByTemplateService(templateServiceName, rpcClientList, filterRetire)
}
func GetRpcClient(nodeId string, serviceMethod string,filterRetire bool, clientList []*rpc.Client) (error, []*rpc.Client) {
if nodeId != rpc.NodeIdNull {
pClient,retire := GetCluster().GetRpcClient(nodeId)
if pClient == nil {
return fmt.Errorf("cannot find nodeid %d!", nodeId), 0
return fmt.Errorf("cannot find nodeid %d!", nodeId), nil
}
//如果需要筛选掉退休结点
if filterRetire == true && retire == true {
return fmt.Errorf("cannot find nodeid %d!", nodeId), 0
return fmt.Errorf("cannot find nodeid %d!", nodeId), nil
}
clientList[0] = pClient
return nil, 1
clientList = append(clientList,pClient)
return nil, clientList
}
findIndex := strings.Index(serviceMethod, ".")
if findIndex == -1 {
return fmt.Errorf("servicemethod param %s is error!", serviceMethod), 0
return fmt.Errorf("servicemethod param %s is error!", serviceMethod), nil
}
serviceName := serviceMethod[:findIndex]
@@ -322,23 +352,12 @@ func (cls *Cluster) NotifyAllService(event event.IEvent){
}
func (cls *Cluster) TriggerDiscoveryEvent(bDiscovery bool, nodeId string, serviceName []string) {
cls.rpcEventLocker.Lock()
defer cls.rpcEventLocker.Unlock()
for sName, _ := range cls.mapServiceListenDiscoveryEvent {
ser := service.GetService(sName)
if ser == nil {
log.Error("cannot find service",log.Any("services",serviceName))
continue
}
var eventData service.DiscoveryServiceEvent
eventData.IsDiscovery = bDiscovery
eventData.NodeId = nodeId
eventData.ServiceName = serviceName
ser.(service.IModule).NotifyEvent(&eventData)
}
var eventData service.DiscoveryServiceEvent
eventData.IsDiscovery = bDiscovery
eventData.NodeId = nodeId
eventData.ServiceName = serviceName
cls.NotifyAllService(&eventData)
}
func (cls *Cluster) GetLocalNodeInfo() *NodeInfo {
@@ -361,25 +380,6 @@ func (cls *Cluster) UnRegRpcEvent(serviceName string) {
cls.rpcEventLocker.Unlock()
}
func (cls *Cluster) RegDiscoveryEvent(serviceName string) {
cls.rpcEventLocker.Lock()
if cls.mapServiceListenDiscoveryEvent == nil {
cls.mapServiceListenDiscoveryEvent = map[string]struct{}{}
}
cls.mapServiceListenDiscoveryEvent[serviceName] = struct{}{}
cls.rpcEventLocker.Unlock()
}
func (cls *Cluster) UnReDiscoveryEvent(serviceName string) {
cls.rpcEventLocker.Lock()
delete(cls.mapServiceListenDiscoveryEvent, serviceName)
cls.rpcEventLocker.Unlock()
}
func HasService(nodeId string, serviceName string) bool {
cluster.locker.RLock()
defer cluster.locker.RUnlock()
@@ -410,10 +410,50 @@ func GetNodeByServiceName(serviceName string) map[string]struct{} {
return mapNodeId
}
// GetNodeByTemplateServiceName 通过模板服务名获取服务名,返回 map[serviceName真实服务名]NodeId
func GetNodeByTemplateServiceName(templateServiceName string) map[string]string {
cluster.locker.RLock()
defer cluster.locker.RUnlock()
mapServiceName := cluster.mapTemplateServiceNode[templateServiceName]
mapNodeId := make(map[string]string,9)
for serviceName := range mapServiceName {
mapNode, ok := cluster.mapServiceNode[serviceName]
if ok == false {
return nil
}
for nodeId,_ := range mapNode {
mapNodeId[serviceName] = nodeId
}
}
return mapNodeId
}
func (cls *Cluster) GetGlobalCfg() interface{} {
return cls.globalCfg
}
func (cls *Cluster) ParseGlobalCfg(cfg interface{}) error{
if cls.globalCfg == nil {
return errors.New("no service configuration found")
}
rv := reflect.ValueOf(cls.globalCfg)
if rv.Kind() == reflect.Ptr && rv.IsNil() {
return errors.New("no service configuration found")
}
bytes,err := json.Marshal(cls.globalCfg)
if err != nil {
return err
}
return json.Unmarshal(bytes,cfg)
}
func (cls *Cluster) GetNodeInfo(nodeId string) (NodeInfo,bool) {
cls.locker.RLock()
defer cls.locker.RUnlock()

View File

@@ -183,6 +183,10 @@ func (ds *OriginDiscoveryMaster) OnNodeDisconnect(nodeId string) {
func (ds *OriginDiscoveryMaster) RpcCastGo(serviceMethod string, args interface{}) {
for nodeId, _ := range ds.mapNodeInfo {
if nodeId == cluster.GetLocalNodeInfo().NodeId {
continue
}
ds.GoNode(nodeId, serviceMethod, args)
}
}

View File

@@ -270,7 +270,7 @@ func (cls *Cluster) readLocalClusterConfig(nodeId string) (DiscoveryInfo, []Node
}
if nodeId != rpc.NodeIdNull && (len(nodeInfoList) != 1) {
return discoveryInfo, nil,rpcMode, fmt.Errorf("%d configurations were found for the configuration with node ID %d!", len(nodeInfoList), nodeId)
return discoveryInfo, nil,rpcMode, fmt.Errorf("nodeid %s configuration error in NodeList", nodeId)
}
for i, _ := range nodeInfoList {
@@ -325,6 +325,10 @@ func (cls *Cluster) readLocalService(localNodeId string) error {
//保存公共配置
for _, s := range cls.localNodeInfo.ServiceList {
for {
splitServiceName := strings.Split(s,":")
if len(splitServiceName) == 2 {
s = splitServiceName[0]
}
//取公共服务配置
pubCfg, ok := serviceConfig[s]
if ok == true {
@@ -355,6 +359,11 @@ func (cls *Cluster) readLocalService(localNodeId string) error {
//组合所有的配置
for _, s := range cls.localNodeInfo.ServiceList {
splitServiceName := strings.Split(s,":")
if len(splitServiceName) == 2 {
s = splitServiceName[0]
}
//先从NodeService中找
var serviceCfg interface{}
var ok bool
@@ -382,12 +391,24 @@ func (cls *Cluster) parseLocalCfg() {
cls.mapRpc[cls.localNodeInfo.NodeId] = &rpcInfo
for _, sName := range cls.localNodeInfo.ServiceList {
if _, ok := cls.mapServiceNode[sName]; ok == false {
cls.mapServiceNode[sName] = make(map[string]struct{})
for _, serviceName := range cls.localNodeInfo.ServiceList {
splitServiceName := strings.Split(serviceName,":")
if len(splitServiceName) == 2 {
serviceName = splitServiceName[0]
templateServiceName := splitServiceName[1]
//记录模板
if _, ok := cls.mapTemplateServiceNode[templateServiceName]; ok == false {
cls.mapTemplateServiceNode[templateServiceName]=map[string]struct{}{}
}
cls.mapTemplateServiceNode[templateServiceName][serviceName] = struct{}{}
}
cls.mapServiceNode[sName][cls.localNodeInfo.NodeId] = struct{}{}
if _, ok := cls.mapServiceNode[serviceName]; ok == false {
cls.mapServiceNode[serviceName] = make(map[string]struct{})
}
cls.mapServiceNode[serviceName][cls.localNodeInfo.NodeId] = struct{}{}
}
}
@@ -403,6 +424,7 @@ func (cls *Cluster) InitCfg(localNodeId string) error {
cls.localServiceCfg = map[string]interface{}{}
cls.mapRpc = map[string]*NodeRpcInfo{}
cls.mapServiceNode = map[string]map[string]struct{}{}
cls.mapTemplateServiceNode = map[string]map[string]struct{}{}
//加载本地结点的NodeList配置
discoveryInfo, nodeInfoList,rpcMode, err := cls.readLocalClusterConfig(localNodeId)
@@ -436,12 +458,37 @@ func (cls *Cluster) IsConfigService(serviceName string) bool {
return ok
}
func (cls *Cluster) GetNodeIdByTemplateService(templateServiceName string, rpcClientList []*rpc.Client, filterRetire bool) (error, []*rpc.Client) {
cls.locker.RLock()
defer cls.locker.RUnlock()
func (cls *Cluster) GetNodeIdByService(serviceName string, rpcClientList []*rpc.Client, filterRetire bool) (error, int) {
mapServiceName := cls.mapTemplateServiceNode[templateServiceName]
for serviceName := range mapServiceName {
mapNodeId, ok := cls.mapServiceNode[serviceName]
if ok == true {
for nodeId, _ := range mapNodeId {
pClient,retire := GetCluster().getRpcClient(nodeId)
if pClient == nil || pClient.IsConnected() == false {
continue
}
//如果需要筛选掉退休的对retire状态的结点略过
if filterRetire == true && retire == true {
continue
}
rpcClientList = append(rpcClientList,pClient)
}
}
}
return nil, rpcClientList
}
func (cls *Cluster) GetNodeIdByService(serviceName string, rpcClientList []*rpc.Client, filterRetire bool) (error, []*rpc.Client) {
cls.locker.RLock()
defer cls.locker.RUnlock()
mapNodeId, ok := cls.mapServiceNode[serviceName]
count := 0
if ok == true {
for nodeId, _ := range mapNodeId {
pClient,retire := GetCluster().getRpcClient(nodeId)
@@ -454,15 +501,11 @@ func (cls *Cluster) GetNodeIdByService(serviceName string, rpcClientList []*rpc.
continue
}
rpcClientList[count] = pClient
count++
if count >= cap(rpcClientList) {
break
}
rpcClientList = append(rpcClientList,pClient)
}
}
return nil, count
return nil, rpcClientList
}
func (cls *Cluster) GetServiceCfg(serviceName string) interface{} {

View File

@@ -10,6 +10,7 @@ import (
"github.com/duanhf2012/origin/v2/log"
"github.com/duanhf2012/origin/v2/util/queue"
"context"
)
var idleTimeout = int64(2 * time.Second)
@@ -30,6 +31,9 @@ type dispatch struct {
waitWorker sync.WaitGroup
waitDispatch sync.WaitGroup
cancelContext context.Context
cancel context.CancelFunc
}
func (d *dispatch) open(minGoroutineNum int32, maxGoroutineNum int32, tasks chan task, cbChannel chan func(error)) {
@@ -40,7 +44,7 @@ func (d *dispatch) open(minGoroutineNum int32, maxGoroutineNum int32, tasks chan
d.workerQueue = make(chan task)
d.cbChannel = cbChannel
d.queueIdChannel = make(chan int64, cap(tasks))
d.cancelContext,d.cancel = context.WithCancel(context.Background())
d.waitDispatch.Add(1)
go d.run()
}
@@ -64,10 +68,12 @@ func (d *dispatch) run() {
d.processqueueEvent(queueId)
case <-timeout.C:
d.processTimer()
if atomic.LoadInt32(&d.minConcurrentNum) == -1 && len(d.tasks) == 0 {
atomic.StoreInt64(&idleTimeout,int64(time.Millisecond * 10))
}
case <- d.cancelContext.Done():
atomic.StoreInt64(&idleTimeout,int64(time.Millisecond * 5))
timeout.Reset(time.Duration(atomic.LoadInt64(&idleTimeout)))
for i:=int32(0);i<d.workerNum;i++{
d.processIdle()
}
}
}
@@ -166,6 +172,8 @@ func (c *dispatch) pushAsyncDoCallbackEvent(cb func(err error)) {
func (d *dispatch) close() {
atomic.StoreInt32(&d.minConcurrentNum, -1)
d.cancel()
breakFor:
for {

View File

@@ -236,7 +236,6 @@ func (processor *EventProcessor) castEvent(event IEvent){
eventProcessor,ok := processor.mapListenerEvent[event.GetEventType()]
if ok == false || processor == nil{
log.Debug("event is not listen",log.Int("event type",int(event.GetEventType())))
return
}

View File

@@ -17,6 +17,7 @@ const (
Sys_Event_QueueTaskFinish EventType = -10
Sys_Event_Retire EventType = -11
Sys_Event_EtcdDiscovery EventType = -12
Sys_Event_Gin_Event EventType = -13
Sys_Event_User_Define EventType = 1
)

View File

@@ -10,16 +10,20 @@ import (
"sync"
)
const defaultSkip = 7
type IOriginHandler interface {
slog.Handler
Lock()
UnLock()
SetSkip(skip int)
GetSkip() int
}
type BaseHandler struct {
addSource bool
w io.Writer
locker sync.Mutex
skip int
}
type OriginTextHandler struct {
@@ -32,6 +36,14 @@ type OriginJsonHandler struct {
*slog.JSONHandler
}
func (bh *BaseHandler) SetSkip(skip int){
bh.skip = skip
}
func (bh *BaseHandler) GetSkip() int{
return bh.skip
}
func getStrLevel(level slog.Level) string{
switch level {
case LevelTrace:
@@ -78,6 +90,7 @@ func NewOriginTextHandler(level slog.Level,w io.Writer,addSource bool,replaceAtt
ReplaceAttr: replaceAttr,
})
textHandler.skip = defaultSkip
return &textHandler
}
@@ -124,6 +137,7 @@ func NewOriginJsonHandler(level slog.Level,w io.Writer,addSource bool,replaceAtt
ReplaceAttr: replaceAttr,
})
jsonHandler.skip = defaultSkip
return &jsonHandler
}
@@ -141,7 +155,7 @@ func (oh *OriginJsonHandler) Handle(context context.Context, record slog.Record)
func (b *BaseHandler) Fill(context context.Context, record *slog.Record) {
if b.addSource {
var pcs [1]uintptr
runtime.Callers(7, pcs[:])
runtime.Callers(b.skip, pcs[:])
record.PC = pcs[0]
}
}

View File

@@ -239,6 +239,10 @@ func (iw *IoWriter) swichFile() error{
return nil
}
func GetDefaultHandler() IOriginHandler{
return gLogger.(*Logger).Slogger.Handler().(IOriginHandler)
}
func NewTextLogger(level slog.Level,pathName string,filePrefix string,addSource bool,logChannelCap int) (ILogger,error){
var logger Logger
logger.ioWriter.filePath = pathName

View File

@@ -45,8 +45,10 @@ func (jsonProcessor *JsonProcessor) SetByteOrder(littleEndian bool) {
}
// must goroutine safe
func (jsonProcessor *JsonProcessor ) MsgRoute(clientId string,msg interface{}) error{
func (jsonProcessor *JsonProcessor ) MsgRoute(clientId string,msg interface{},recyclerReaderBytes func(data []byte)) error{
pPackInfo := msg.(*JsonPackInfo)
defer recyclerReaderBytes(pPackInfo.rawMsg)
v,ok := jsonProcessor.mapMsg[pPackInfo.typ]
if ok == false {
return fmt.Errorf("cannot find msgtype %d is register!",pPackInfo.typ)
@@ -58,7 +60,6 @@ func (jsonProcessor *JsonProcessor ) MsgRoute(clientId string,msg interface{}) e
func (jsonProcessor *JsonProcessor) Unmarshal(clientId string,data []byte) (interface{}, error) {
typeStruct := struct {Type int `json:"typ"`}{}
defer jsonProcessor.ReleaseBytes(data)
err := json.Unmarshal(data, &typeStruct)
if err != nil {
return nil, err
@@ -76,7 +77,7 @@ func (jsonProcessor *JsonProcessor) Unmarshal(clientId string,data []byte) (inte
return nil,err
}
return &JsonPackInfo{typ:msgType,msg:msgData},nil
return &JsonPackInfo{typ:msgType,msg:msgData,rawMsg: data},nil
}
func (jsonProcessor *JsonProcessor) Marshal(clientId string,msg interface{}) ([]byte, error) {
@@ -104,7 +105,8 @@ func (jsonProcessor *JsonProcessor) MakeRawMsg(msgType uint16,msg []byte) *JsonP
return &JsonPackInfo{typ:msgType,rawMsg:msg}
}
func (jsonProcessor *JsonProcessor) UnknownMsgRoute(clientId string,msg interface{}){
func (jsonProcessor *JsonProcessor) UnknownMsgRoute(clientId string,msg interface{},recyclerReaderBytes func(data []byte)){
defer recyclerReaderBytes(msg.([]byte))
if jsonProcessor.unknownMessageHandler==nil {
log.Debug("Unknown message",log.String("clientId",clientId))
return

View File

@@ -54,8 +54,10 @@ func (slf *PBPackInfo) GetMsg() proto.Message {
}
// must goroutine safe
func (pbProcessor *PBProcessor) MsgRoute(clientId string, msg interface{}) error {
func (pbProcessor *PBProcessor) MsgRoute(clientId string, msg interface{},recyclerReaderBytes func(data []byte)) error {
pPackInfo := msg.(*PBPackInfo)
defer recyclerReaderBytes(pPackInfo.rawMsg)
v, ok := pbProcessor.mapMsg[pPackInfo.typ]
if ok == false {
return fmt.Errorf("Cannot find msgtype %d is register!", pPackInfo.typ)
@@ -67,7 +69,6 @@ func (pbProcessor *PBProcessor) MsgRoute(clientId string, msg interface{}) error
// must goroutine safe
func (pbProcessor *PBProcessor) Unmarshal(clientId string, data []byte) (interface{}, error) {
defer pbProcessor.ReleaseBytes(data)
return pbProcessor.UnmarshalWithOutRelease(clientId, data)
}
@@ -91,7 +92,7 @@ func (pbProcessor *PBProcessor) UnmarshalWithOutRelease(clientId string, data []
return nil, err
}
return &PBPackInfo{typ: msgType, msg: protoMsg}, nil
return &PBPackInfo{typ: msgType, msg: protoMsg,rawMsg:data}, nil
}
// must goroutine safe
@@ -133,8 +134,9 @@ func (pbProcessor *PBProcessor) MakeRawMsg(msgType uint16, msg []byte) *PBPackIn
return &PBPackInfo{typ: msgType, rawMsg: msg}
}
func (pbProcessor *PBProcessor) UnknownMsgRoute(clientId string, msg interface{}) {
func (pbProcessor *PBProcessor) UnknownMsgRoute(clientId string, msg interface{},recyclerReaderBytes func(data []byte)) {
pbProcessor.unknownMessageHandler(clientId, msg.([]byte))
recyclerReaderBytes(msg.([]byte))
}
// connect event

View File

@@ -38,9 +38,11 @@ func (pbRawProcessor *PBRawProcessor) SetByteOrder(littleEndian bool) {
}
// must goroutine safe
func (pbRawProcessor *PBRawProcessor ) MsgRoute(clientId string, msg interface{}) error{
func (pbRawProcessor *PBRawProcessor ) MsgRoute(clientId string, msg interface{},recyclerReaderBytes func(data []byte)) error{
pPackInfo := msg.(*PBRawPackInfo)
pbRawProcessor.msgHandler(clientId,pPackInfo.typ,pPackInfo.rawMsg)
recyclerReaderBytes(pPackInfo.rawMsg)
return nil
}
@@ -80,7 +82,8 @@ func (pbRawProcessor *PBRawProcessor) MakeRawMsg(msgType uint16,msg []byte,pbRaw
pbRawPackInfo.rawMsg = msg
}
func (pbRawProcessor *PBRawProcessor) UnknownMsgRoute(clientId string,msg interface{}){
func (pbRawProcessor *PBRawProcessor) UnknownMsgRoute(clientId string,msg interface{},recyclerReaderBytes func(data []byte)){
defer recyclerReaderBytes(msg.([]byte))
if pbRawProcessor.unknownMessageHandler == nil {
return
}

View File

@@ -3,9 +3,9 @@ package processor
type IProcessor interface {
// must goroutine safe
MsgRoute(clientId string,msg interface{}) error
MsgRoute(clientId string,msg interface{},recyclerReaderBytes func(data []byte)) error
//must goroutine safe
UnknownMsgRoute(clientId string,msg interface{})
UnknownMsgRoute(clientId string,msg interface{},recyclerReaderBytes func(data []byte))
// connect event
ConnectedRoute(clientId string)
DisConnectedRoute(clientId string)

View File

@@ -129,6 +129,13 @@ func (tcpConn *TCPConn) ReadMsg() ([]byte, error) {
return tcpConn.msgParser.Read(tcpConn)
}
func (tcpConn *TCPConn) GetRecyclerReaderBytes() func (data []byte) {
bytePool := tcpConn.msgParser.IBytesMempool
return func(data []byte) {
bytePool.ReleaseBytes(data)
}
}
func (tcpConn *TCPConn) ReleaseReadMsg(byteBuff []byte){
tcpConn.msgParser.ReleaseBytes(byteBuff)
}

View File

@@ -25,9 +25,11 @@ import (
var sig chan os.Signal
var nodeId string
var preSetupService []service.IService //预安装
var preSetupTemplateService []func()service.IService
var profilerInterval time.Duration
var bValid bool
var configDir = "./config/"
var NodeIsRun = false
const(
SingleStop syscall.Signal = 10
@@ -56,7 +58,7 @@ func init() {
console.RegisterCommandString("loglevel", "debug", "<-loglevel debug|release|warning|error|fatal> Set loglevel.", setLevel)
console.RegisterCommandString("logpath", "", "<-logpath path> Set log file path.", setLogPath)
console.RegisterCommandInt("logsize", 0, "<-logsize size> Set log size(MB).", setLogSize)
console.RegisterCommandInt("logchannelcap", 0, "<-logchannelcap num> Set log channel cap.", setLogChannelCapNum)
console.RegisterCommandInt("logchannelcap", -1, "<-logchannelcap num> Set log channel cap.", setLogChannelCapNum)
console.RegisterCommandString("pprof", "", "<-pprof ip:port> Open performance analysis.", setPprof)
}
@@ -117,7 +119,7 @@ func setConfigPath(val interface{}) error {
}
func getRunProcessPid(nodeId string) (int, error) {
f, err := os.OpenFile(fmt.Sprintf("%s_%d.pid", os.Args[0], nodeId), os.O_RDONLY, 0600)
f, err := os.OpenFile(fmt.Sprintf("%s_%s.pid", os.Args[0], nodeId), os.O_RDONLY, 0600)
defer f.Close()
if err != nil {
return 0, err
@@ -169,6 +171,31 @@ func initNode(id string) {
serviceOrder := cluster.GetCluster().GetLocalNodeInfo().ServiceList
for _,serviceName:= range serviceOrder{
bSetup := false
//判断是否有配置模板服务
splitServiceName := strings.Split(serviceName,":")
if len(splitServiceName) == 2 {
serviceName = splitServiceName[0]
templateServiceName := splitServiceName[1]
for _,newSer := range preSetupTemplateService {
ser := newSer()
ser.OnSetup(ser)
if ser.GetName() == templateServiceName {
ser.SetName(serviceName)
ser.Init(ser,cluster.GetRpcClient,cluster.GetRpcServer,cluster.GetCluster().GetServiceCfg(ser.GetName()))
service.Setup(ser)
bSetup = true
break
}
}
if bSetup == false{
log.Error("Template service not found",log.String("service name",serviceName),log.String("template service name",templateServiceName))
os.Exit(1)
}
}
for _, s := range preSetupService {
if s.GetName() != serviceName {
continue
@@ -328,13 +355,14 @@ func startNode(args interface{}) error {
cluster.GetCluster().Start()
//6.监听程序退出信号&性能报告
bRun := true
var pProfilerTicker *time.Ticker = &time.Ticker{}
if profilerInterval > 0 {
pProfilerTicker = time.NewTicker(profilerInterval)
}
for bRun {
NodeIsRun = true
for NodeIsRun {
select {
case s := <-sig:
signal := s.(syscall.Signal)
@@ -342,7 +370,7 @@ func startNode(args interface{}) error {
log.Info("receipt retire signal.")
notifyAllServiceRetire()
}else {
bRun = false
NodeIsRun = false
log.Info("receipt stop signal.")
}
case <-pProfilerTicker.C:
@@ -367,6 +395,12 @@ func Setup(s ...service.IService) {
}
}
func SetupTemplate(fs ...func()service.IService){
for _, f := range fs {
preSetupTemplateService = append(preSetupTemplateService, f)
}
}
func GetService(serviceName string) service.IService {
return service.GetService(serviceName)
}
@@ -472,6 +506,10 @@ func setLogChannelCapNum(args interface{}) error {
return errors.New("param logsize is error")
}
if logChannelCap == -1 {
return nil
}
log.LogChannelCap = logChannelCap
return nil
}

View File

@@ -1,8 +1,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.31.0
// protoc v3.11.4
// source: test/rpc/messagequeue.proto
// protoc v4.24.0
// source: rpcproto/messagequeue.proto
package rpc
@@ -50,11 +50,11 @@ func (x SubscribeType) String() string {
}
func (SubscribeType) Descriptor() protoreflect.EnumDescriptor {
return file_test_rpc_messagequeue_proto_enumTypes[0].Descriptor()
return file_rpcproto_messagequeue_proto_enumTypes[0].Descriptor()
}
func (SubscribeType) Type() protoreflect.EnumType {
return &file_test_rpc_messagequeue_proto_enumTypes[0]
return &file_rpcproto_messagequeue_proto_enumTypes[0]
}
func (x SubscribeType) Number() protoreflect.EnumNumber {
@@ -63,7 +63,7 @@ func (x SubscribeType) Number() protoreflect.EnumNumber {
// Deprecated: Use SubscribeType.Descriptor instead.
func (SubscribeType) EnumDescriptor() ([]byte, []int) {
return file_test_rpc_messagequeue_proto_rawDescGZIP(), []int{0}
return file_rpcproto_messagequeue_proto_rawDescGZIP(), []int{0}
}
type SubscribeMethod int32
@@ -96,11 +96,11 @@ func (x SubscribeMethod) String() string {
}
func (SubscribeMethod) Descriptor() protoreflect.EnumDescriptor {
return file_test_rpc_messagequeue_proto_enumTypes[1].Descriptor()
return file_rpcproto_messagequeue_proto_enumTypes[1].Descriptor()
}
func (SubscribeMethod) Type() protoreflect.EnumType {
return &file_test_rpc_messagequeue_proto_enumTypes[1]
return &file_rpcproto_messagequeue_proto_enumTypes[1]
}
func (x SubscribeMethod) Number() protoreflect.EnumNumber {
@@ -109,7 +109,7 @@ func (x SubscribeMethod) Number() protoreflect.EnumNumber {
// Deprecated: Use SubscribeMethod.Descriptor instead.
func (SubscribeMethod) EnumDescriptor() ([]byte, []int) {
return file_test_rpc_messagequeue_proto_rawDescGZIP(), []int{1}
return file_rpcproto_messagequeue_proto_rawDescGZIP(), []int{1}
}
type DBQueuePopReq struct {
@@ -127,7 +127,7 @@ type DBQueuePopReq struct {
func (x *DBQueuePopReq) Reset() {
*x = DBQueuePopReq{}
if protoimpl.UnsafeEnabled {
mi := &file_test_rpc_messagequeue_proto_msgTypes[0]
mi := &file_rpcproto_messagequeue_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -140,7 +140,7 @@ func (x *DBQueuePopReq) String() string {
func (*DBQueuePopReq) ProtoMessage() {}
func (x *DBQueuePopReq) ProtoReflect() protoreflect.Message {
mi := &file_test_rpc_messagequeue_proto_msgTypes[0]
mi := &file_rpcproto_messagequeue_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -153,7 +153,7 @@ func (x *DBQueuePopReq) ProtoReflect() protoreflect.Message {
// Deprecated: Use DBQueuePopReq.ProtoReflect.Descriptor instead.
func (*DBQueuePopReq) Descriptor() ([]byte, []int) {
return file_test_rpc_messagequeue_proto_rawDescGZIP(), []int{0}
return file_rpcproto_messagequeue_proto_rawDescGZIP(), []int{0}
}
func (x *DBQueuePopReq) GetCustomerId() string {
@@ -203,7 +203,7 @@ type DBQueuePopRes struct {
func (x *DBQueuePopRes) Reset() {
*x = DBQueuePopRes{}
if protoimpl.UnsafeEnabled {
mi := &file_test_rpc_messagequeue_proto_msgTypes[1]
mi := &file_rpcproto_messagequeue_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -216,7 +216,7 @@ func (x *DBQueuePopRes) String() string {
func (*DBQueuePopRes) ProtoMessage() {}
func (x *DBQueuePopRes) ProtoReflect() protoreflect.Message {
mi := &file_test_rpc_messagequeue_proto_msgTypes[1]
mi := &file_rpcproto_messagequeue_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -229,7 +229,7 @@ func (x *DBQueuePopRes) ProtoReflect() protoreflect.Message {
// Deprecated: Use DBQueuePopRes.ProtoReflect.Descriptor instead.
func (*DBQueuePopRes) Descriptor() ([]byte, []int) {
return file_test_rpc_messagequeue_proto_rawDescGZIP(), []int{1}
return file_rpcproto_messagequeue_proto_rawDescGZIP(), []int{1}
}
func (x *DBQueuePopRes) GetQueueName() string {
@@ -255,7 +255,7 @@ type DBQueueSubscribeReq struct {
SubType SubscribeType `protobuf:"varint,1,opt,name=SubType,proto3,enum=SubscribeType" json:"SubType,omitempty"` //订阅类型
Method SubscribeMethod `protobuf:"varint,2,opt,name=Method,proto3,enum=SubscribeMethod" json:"Method,omitempty"` //订阅方法
CustomerId string `protobuf:"bytes,3,opt,name=CustomerId,proto3" json:"CustomerId,omitempty"` //消费者Id
FromNodeId int32 `protobuf:"varint,4,opt,name=FromNodeId,proto3" json:"FromNodeId,omitempty"`
FromNodeId string `protobuf:"bytes,4,opt,name=FromNodeId,proto3" json:"FromNodeId,omitempty"`
RpcMethod string `protobuf:"bytes,5,opt,name=RpcMethod,proto3" json:"RpcMethod,omitempty"`
TopicName string `protobuf:"bytes,6,opt,name=TopicName,proto3" json:"TopicName,omitempty"` //主题名称
StartIndex uint64 `protobuf:"varint,7,opt,name=StartIndex,proto3" json:"StartIndex,omitempty"` //开始位置 ,格式前4位是时间戳秒后面是序号。如果填0时服务自动修改成(4bit 当前时间秒)| (0000 4bit)
@@ -265,7 +265,7 @@ type DBQueueSubscribeReq struct {
func (x *DBQueueSubscribeReq) Reset() {
*x = DBQueueSubscribeReq{}
if protoimpl.UnsafeEnabled {
mi := &file_test_rpc_messagequeue_proto_msgTypes[2]
mi := &file_rpcproto_messagequeue_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -278,7 +278,7 @@ func (x *DBQueueSubscribeReq) String() string {
func (*DBQueueSubscribeReq) ProtoMessage() {}
func (x *DBQueueSubscribeReq) ProtoReflect() protoreflect.Message {
mi := &file_test_rpc_messagequeue_proto_msgTypes[2]
mi := &file_rpcproto_messagequeue_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -291,7 +291,7 @@ func (x *DBQueueSubscribeReq) ProtoReflect() protoreflect.Message {
// Deprecated: Use DBQueueSubscribeReq.ProtoReflect.Descriptor instead.
func (*DBQueueSubscribeReq) Descriptor() ([]byte, []int) {
return file_test_rpc_messagequeue_proto_rawDescGZIP(), []int{2}
return file_rpcproto_messagequeue_proto_rawDescGZIP(), []int{2}
}
func (x *DBQueueSubscribeReq) GetSubType() SubscribeType {
@@ -315,11 +315,11 @@ func (x *DBQueueSubscribeReq) GetCustomerId() string {
return ""
}
func (x *DBQueueSubscribeReq) GetFromNodeId() int32 {
func (x *DBQueueSubscribeReq) GetFromNodeId() string {
if x != nil {
return x.FromNodeId
}
return 0
return ""
}
func (x *DBQueueSubscribeReq) GetRpcMethod() string {
@@ -359,7 +359,7 @@ type DBQueueSubscribeRes struct {
func (x *DBQueueSubscribeRes) Reset() {
*x = DBQueueSubscribeRes{}
if protoimpl.UnsafeEnabled {
mi := &file_test_rpc_messagequeue_proto_msgTypes[3]
mi := &file_rpcproto_messagequeue_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -372,7 +372,7 @@ func (x *DBQueueSubscribeRes) String() string {
func (*DBQueueSubscribeRes) ProtoMessage() {}
func (x *DBQueueSubscribeRes) ProtoReflect() protoreflect.Message {
mi := &file_test_rpc_messagequeue_proto_msgTypes[3]
mi := &file_rpcproto_messagequeue_proto_msgTypes[3]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -385,7 +385,7 @@ func (x *DBQueueSubscribeRes) ProtoReflect() protoreflect.Message {
// Deprecated: Use DBQueueSubscribeRes.ProtoReflect.Descriptor instead.
func (*DBQueueSubscribeRes) Descriptor() ([]byte, []int) {
return file_test_rpc_messagequeue_proto_rawDescGZIP(), []int{3}
return file_rpcproto_messagequeue_proto_rawDescGZIP(), []int{3}
}
type DBQueuePublishReq struct {
@@ -400,7 +400,7 @@ type DBQueuePublishReq struct {
func (x *DBQueuePublishReq) Reset() {
*x = DBQueuePublishReq{}
if protoimpl.UnsafeEnabled {
mi := &file_test_rpc_messagequeue_proto_msgTypes[4]
mi := &file_rpcproto_messagequeue_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -413,7 +413,7 @@ func (x *DBQueuePublishReq) String() string {
func (*DBQueuePublishReq) ProtoMessage() {}
func (x *DBQueuePublishReq) ProtoReflect() protoreflect.Message {
mi := &file_test_rpc_messagequeue_proto_msgTypes[4]
mi := &file_rpcproto_messagequeue_proto_msgTypes[4]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -426,7 +426,7 @@ func (x *DBQueuePublishReq) ProtoReflect() protoreflect.Message {
// Deprecated: Use DBQueuePublishReq.ProtoReflect.Descriptor instead.
func (*DBQueuePublishReq) Descriptor() ([]byte, []int) {
return file_test_rpc_messagequeue_proto_rawDescGZIP(), []int{4}
return file_rpcproto_messagequeue_proto_rawDescGZIP(), []int{4}
}
func (x *DBQueuePublishReq) GetTopicName() string {
@@ -452,7 +452,7 @@ type DBQueuePublishRes struct {
func (x *DBQueuePublishRes) Reset() {
*x = DBQueuePublishRes{}
if protoimpl.UnsafeEnabled {
mi := &file_test_rpc_messagequeue_proto_msgTypes[5]
mi := &file_rpcproto_messagequeue_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -465,7 +465,7 @@ func (x *DBQueuePublishRes) String() string {
func (*DBQueuePublishRes) ProtoMessage() {}
func (x *DBQueuePublishRes) ProtoReflect() protoreflect.Message {
mi := &file_test_rpc_messagequeue_proto_msgTypes[5]
mi := &file_rpcproto_messagequeue_proto_msgTypes[5]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -478,13 +478,13 @@ func (x *DBQueuePublishRes) ProtoReflect() protoreflect.Message {
// Deprecated: Use DBQueuePublishRes.ProtoReflect.Descriptor instead.
func (*DBQueuePublishRes) Descriptor() ([]byte, []int) {
return file_test_rpc_messagequeue_proto_rawDescGZIP(), []int{5}
return file_rpcproto_messagequeue_proto_rawDescGZIP(), []int{5}
}
var File_test_rpc_messagequeue_proto protoreflect.FileDescriptor
var File_rpcproto_messagequeue_proto protoreflect.FileDescriptor
var file_test_rpc_messagequeue_proto_rawDesc = []byte{
0x0a, 0x1b, 0x74, 0x65, 0x73, 0x74, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61,
var file_rpcproto_messagequeue_proto_rawDesc = []byte{
0x0a, 0x1b, 0x72, 0x70, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61,
0x67, 0x65, 0x71, 0x75, 0x65, 0x75, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa3, 0x01,
0x0a, 0x0d, 0x44, 0x42, 0x51, 0x75, 0x65, 0x75, 0x65, 0x50, 0x6f, 0x70, 0x52, 0x65, 0x71, 0x12,
0x1e, 0x0a, 0x0a, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x49, 0x64, 0x18, 0x01, 0x20,
@@ -510,7 +510,7 @@ var file_test_rpc_messagequeue_proto_rawDesc = []byte{
0x6f, 0x64, 0x52, 0x06, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x43, 0x75,
0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x49, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a,
0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x46, 0x72,
0x6f, 0x6d, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a,
0x6f, 0x6d, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a,
0x46, 0x72, 0x6f, 0x6d, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x52, 0x70,
0x63, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x52,
0x70, 0x63, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x54, 0x6f, 0x70, 0x69,
@@ -539,20 +539,20 @@ var file_test_rpc_messagequeue_proto_rawDesc = []byte{
}
var (
file_test_rpc_messagequeue_proto_rawDescOnce sync.Once
file_test_rpc_messagequeue_proto_rawDescData = file_test_rpc_messagequeue_proto_rawDesc
file_rpcproto_messagequeue_proto_rawDescOnce sync.Once
file_rpcproto_messagequeue_proto_rawDescData = file_rpcproto_messagequeue_proto_rawDesc
)
func file_test_rpc_messagequeue_proto_rawDescGZIP() []byte {
file_test_rpc_messagequeue_proto_rawDescOnce.Do(func() {
file_test_rpc_messagequeue_proto_rawDescData = protoimpl.X.CompressGZIP(file_test_rpc_messagequeue_proto_rawDescData)
func file_rpcproto_messagequeue_proto_rawDescGZIP() []byte {
file_rpcproto_messagequeue_proto_rawDescOnce.Do(func() {
file_rpcproto_messagequeue_proto_rawDescData = protoimpl.X.CompressGZIP(file_rpcproto_messagequeue_proto_rawDescData)
})
return file_test_rpc_messagequeue_proto_rawDescData
return file_rpcproto_messagequeue_proto_rawDescData
}
var file_test_rpc_messagequeue_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
var file_test_rpc_messagequeue_proto_msgTypes = make([]protoimpl.MessageInfo, 6)
var file_test_rpc_messagequeue_proto_goTypes = []interface{}{
var file_rpcproto_messagequeue_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
var file_rpcproto_messagequeue_proto_msgTypes = make([]protoimpl.MessageInfo, 6)
var file_rpcproto_messagequeue_proto_goTypes = []interface{}{
(SubscribeType)(0), // 0: SubscribeType
(SubscribeMethod)(0), // 1: SubscribeMethod
(*DBQueuePopReq)(nil), // 2: DBQueuePopReq
@@ -562,7 +562,7 @@ var file_test_rpc_messagequeue_proto_goTypes = []interface{}{
(*DBQueuePublishReq)(nil), // 6: DBQueuePublishReq
(*DBQueuePublishRes)(nil), // 7: DBQueuePublishRes
}
var file_test_rpc_messagequeue_proto_depIdxs = []int32{
var file_rpcproto_messagequeue_proto_depIdxs = []int32{
0, // 0: DBQueueSubscribeReq.SubType:type_name -> SubscribeType
1, // 1: DBQueueSubscribeReq.Method:type_name -> SubscribeMethod
2, // [2:2] is the sub-list for method output_type
@@ -572,13 +572,13 @@ var file_test_rpc_messagequeue_proto_depIdxs = []int32{
0, // [0:2] is the sub-list for field type_name
}
func init() { file_test_rpc_messagequeue_proto_init() }
func file_test_rpc_messagequeue_proto_init() {
if File_test_rpc_messagequeue_proto != nil {
func init() { file_rpcproto_messagequeue_proto_init() }
func file_rpcproto_messagequeue_proto_init() {
if File_rpcproto_messagequeue_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_test_rpc_messagequeue_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
file_rpcproto_messagequeue_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*DBQueuePopReq); i {
case 0:
return &v.state
@@ -590,7 +590,7 @@ func file_test_rpc_messagequeue_proto_init() {
return nil
}
}
file_test_rpc_messagequeue_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
file_rpcproto_messagequeue_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*DBQueuePopRes); i {
case 0:
return &v.state
@@ -602,7 +602,7 @@ func file_test_rpc_messagequeue_proto_init() {
return nil
}
}
file_test_rpc_messagequeue_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
file_rpcproto_messagequeue_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*DBQueueSubscribeReq); i {
case 0:
return &v.state
@@ -614,7 +614,7 @@ func file_test_rpc_messagequeue_proto_init() {
return nil
}
}
file_test_rpc_messagequeue_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
file_rpcproto_messagequeue_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*DBQueueSubscribeRes); i {
case 0:
return &v.state
@@ -626,7 +626,7 @@ func file_test_rpc_messagequeue_proto_init() {
return nil
}
}
file_test_rpc_messagequeue_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
file_rpcproto_messagequeue_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*DBQueuePublishReq); i {
case 0:
return &v.state
@@ -638,7 +638,7 @@ func file_test_rpc_messagequeue_proto_init() {
return nil
}
}
file_test_rpc_messagequeue_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
file_rpcproto_messagequeue_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*DBQueuePublishRes); i {
case 0:
return &v.state
@@ -655,19 +655,19 @@ func file_test_rpc_messagequeue_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_test_rpc_messagequeue_proto_rawDesc,
RawDescriptor: file_rpcproto_messagequeue_proto_rawDesc,
NumEnums: 2,
NumMessages: 6,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_test_rpc_messagequeue_proto_goTypes,
DependencyIndexes: file_test_rpc_messagequeue_proto_depIdxs,
EnumInfos: file_test_rpc_messagequeue_proto_enumTypes,
MessageInfos: file_test_rpc_messagequeue_proto_msgTypes,
GoTypes: file_rpcproto_messagequeue_proto_goTypes,
DependencyIndexes: file_rpcproto_messagequeue_proto_depIdxs,
EnumInfos: file_rpcproto_messagequeue_proto_enumTypes,
MessageInfos: file_rpcproto_messagequeue_proto_msgTypes,
}.Build()
File_test_rpc_messagequeue_proto = out.File
file_test_rpc_messagequeue_proto_rawDesc = nil
file_test_rpc_messagequeue_proto_goTypes = nil
file_test_rpc_messagequeue_proto_depIdxs = nil
File_rpcproto_messagequeue_proto = out.File
file_rpcproto_messagequeue_proto_rawDesc = nil
file_rpcproto_messagequeue_proto_goTypes = nil
file_rpcproto_messagequeue_proto_depIdxs = nil
}

View File

@@ -31,7 +31,7 @@ message DBQueueSubscribeReq {
SubscribeType SubType = 1; //订阅类型
SubscribeMethod Method = 2; //订阅方法
string CustomerId = 3; //消费者Id
int32 FromNodeId = 4;
string FromNodeId = 4;
string RpcMethod = 5;
string TopicName = 6; //主题名称
uint64 StartIndex = 7; //开始位置 ,格式前4位是时间戳秒后面是序号。如果填0时服务自动修改成(4bit 当前时间秒)| (0000 4bit)

View File

@@ -13,9 +13,9 @@ import (
"time"
)
const maxClusterNode int = 128
const maxClusterNode int = 32
type FuncRpcClient func(nodeId string, serviceMethod string,filterRetire bool, client []*Client) (error, int)
type FuncRpcClient func(nodeId string, serviceMethod string,filterRetire bool, client []*Client) (error, []*Client)
type FuncRpcServer func() IServer
const NodeIdNull = ""
@@ -63,7 +63,7 @@ type RpcHandler struct {
funcRpcClient FuncRpcClient
funcRpcServer FuncRpcServer
pClientList []*Client
//pClientList []*Client
}
//type TriggerRpcConnEvent func(bConnect bool, clientSeq uint32, nodeId string)
@@ -110,7 +110,6 @@ type IRpcHandler interface {
GoNode(nodeId string, serviceMethod string, args interface{}) error
RawGoNode(rpcProcessorType RpcProcessorType, nodeId string, rpcMethodId uint32, serviceName string, rawArgs []byte) error
CastGo(serviceMethod string, args interface{}) error
IsSingleCoroutine() bool
UnmarshalInParam(rpcProcessor IRpcProcessor, serviceMethod string, rawRpcMethodId uint32, inParam []byte) (interface{}, error)
GetRpcServer() FuncRpcServer
}
@@ -135,7 +134,6 @@ func (handler *RpcHandler) InitRpcHandler(rpcHandler IRpcHandler, getClientFun F
handler.mapFunctions = map[string]RpcMethodInfo{}
handler.funcRpcClient = getClientFun
handler.funcRpcServer = getServerFun
handler.pClientList = make([]*Client, maxClusterNode)
handler.RegisterRpc(rpcHandler)
}
@@ -274,7 +272,7 @@ func (handler *RpcHandler) HandlerRpcRequest(request *RpcRequest) {
//普通的rpc请求
v, ok := handler.mapFunctions[request.RpcRequestData.GetServiceMethod()]
if ok == false {
err := "RpcHandler " + handler.rpcHandler.GetName() + "cannot find " + request.RpcRequestData.GetServiceMethod()
err := "RpcHandler " + handler.rpcHandler.GetName() + " cannot find " + request.RpcRequestData.GetServiceMethod()
log.Error("HandlerRpcRequest cannot find serviceMethod",log.String("RpcHandlerName",handler.rpcHandler.GetName()),log.String("serviceMethod",request.RpcRequestData.GetServiceMethod()))
if request.requestHandle != nil {
request.requestHandle(nil, RpcError(err))
@@ -435,9 +433,9 @@ func (handler *RpcHandler) CallMethod(client *Client,ServiceMethod string, param
}
func (handler *RpcHandler) goRpc(processor IRpcProcessor, bCast bool, nodeId string, serviceMethod string, args interface{}) error {
var pClientList [maxClusterNode]*Client
err, count := handler.funcRpcClient(nodeId, serviceMethod,false, pClientList[:])
if count == 0 {
pClientList :=make([]*Client,0,maxClusterNode)
err, pClientList := handler.funcRpcClient(nodeId, serviceMethod,false, pClientList)
if len(pClientList) == 0 {
if err != nil {
log.Error("call serviceMethod is failed",log.String("serviceMethod",serviceMethod),log.ErrorAttr("error",err))
} else {
@@ -446,13 +444,13 @@ func (handler *RpcHandler) goRpc(processor IRpcProcessor, bCast bool, nodeId str
return err
}
if count > 1 && bCast == false {
if len(pClientList) > 1 && bCast == false {
log.Error("cannot call serviceMethod more then 1 node",log.String("serviceMethod",serviceMethod))
return errors.New("cannot call more then 1 node")
}
//2.rpcClient调用
for i := 0; i < count; i++ {
for i := 0; i < len(pClientList); i++ {
pCall := pClientList[i].Go(pClientList[i].GetTargetNodeId(),DefaultRpcTimeout,handler.rpcHandler,true, serviceMethod, args, nil)
if pCall.Err != nil {
err = pCall.Err
@@ -465,16 +463,16 @@ func (handler *RpcHandler) goRpc(processor IRpcProcessor, bCast bool, nodeId str
}
func (handler *RpcHandler) callRpc(timeout time.Duration,nodeId string, serviceMethod string, args interface{}, reply interface{}) error {
var pClientList [maxClusterNode]*Client
err, count := handler.funcRpcClient(nodeId, serviceMethod,false, pClientList[:])
pClientList :=make([]*Client,0,maxClusterNode)
err, pClientList := handler.funcRpcClient(nodeId, serviceMethod,false, pClientList)
if err != nil {
log.Error("Call serviceMethod is failed",log.ErrorAttr("error",err))
return err
} else if count <= 0 {
} else if len(pClientList) <= 0 {
err = errors.New("Call serviceMethod is error:cannot find " + serviceMethod)
log.Error("cannot find serviceMethod",log.String("serviceMethod",serviceMethod))
return err
} else if count > 1 {
} else if len(pClientList) > 1 {
log.Error("Cannot call more then 1 node!",log.String("serviceMethod",serviceMethod))
return errors.New("cannot call more then 1 node")
}
@@ -509,9 +507,9 @@ func (handler *RpcHandler) asyncCallRpc(timeout time.Duration,nodeId string, ser
}
reply := reflect.New(fVal.Type().In(0).Elem()).Interface()
var pClientList [2]*Client
err, count := handler.funcRpcClient(nodeId, serviceMethod,false, pClientList[:])
if count == 0 || err != nil {
pClientList :=make([]*Client,0,1)
err, pClientList := handler.funcRpcClient(nodeId, serviceMethod,false, pClientList[:])
if len(pClientList) == 0 || err != nil {
if err == nil {
if nodeId != NodeIdNull {
err = fmt.Errorf("cannot find %s from nodeId %d",serviceMethod,nodeId)
@@ -524,7 +522,7 @@ func (handler *RpcHandler) asyncCallRpc(timeout time.Duration,nodeId string, ser
return emptyCancelRpc,nil
}
if count > 1 {
if len(pClientList) > 1 {
err := errors.New("cannot call more then 1 node")
fVal.Call([]reflect.Value{reflect.ValueOf(reply), reflect.ValueOf(err)})
log.Error("cannot call more then 1 node",log.String("serviceMethod",serviceMethod))
@@ -540,10 +538,6 @@ func (handler *RpcHandler) GetName() string {
return handler.rpcHandler.GetName()
}
func (handler *RpcHandler) IsSingleCoroutine() bool {
return handler.rpcHandler.IsSingleCoroutine()
}
func (handler *RpcHandler) CallWithTimeout(timeout time.Duration,serviceMethod string, args interface{}, reply interface{}) error {
return handler.callRpc(timeout,NodeIdNull, serviceMethod, args, reply)
}
@@ -593,12 +587,13 @@ func (handler *RpcHandler) CastGo(serviceMethod string, args interface{}) error
func (handler *RpcHandler) RawGoNode(rpcProcessorType RpcProcessorType, nodeId string, rpcMethodId uint32, serviceName string, rawArgs []byte) error {
processor := GetProcessor(uint8(rpcProcessorType))
err, count := handler.funcRpcClient(nodeId, serviceName,false, handler.pClientList)
if count == 0 || err != nil {
pClientList := make([]*Client,0,1)
err, pClientList := handler.funcRpcClient(nodeId, serviceName,false, pClientList)
if len(pClientList) == 0 || err != nil {
log.Error("call serviceMethod is failed",log.ErrorAttr("error",err))
return err
}
if count > 1 {
if len(pClientList) > 1 {
err := errors.New("cannot call more then 1 node")
log.Error("cannot call more then 1 node",log.String("serviceName",serviceName))
return err
@@ -606,14 +601,14 @@ func (handler *RpcHandler) RawGoNode(rpcProcessorType RpcProcessorType, nodeId s
//2.rpcClient调用
//如果调用本结点服务
for i := 0; i < count; i++ {
for i := 0; i < len(pClientList); i++ {
//跨node调用
pCall := handler.pClientList[i].RawGo(handler.pClientList[i].GetTargetNodeId(),DefaultRpcTimeout,handler.rpcHandler,processor, true, rpcMethodId, serviceName, rawArgs, nil)
pCall := pClientList[i].RawGo(pClientList[i].GetTargetNodeId(),DefaultRpcTimeout,handler.rpcHandler,processor, true, rpcMethodId, serviceName, rawArgs, nil)
if pCall.Err != nil {
err = pCall.Err
}
handler.pClientList[i].RemovePending(pCall.Seq)
pClientList[i].RemovePending(pCall.Seq)
ReleaseCall(pCall)
}

View File

@@ -14,11 +14,14 @@ import (
"sync"
"sync/atomic"
"github.com/duanhf2012/origin/v2/concurrent"
"encoding/json"
)
var timerDispatcherLen = 100000
var maxServiceEventChannelNum = 2000000
type IService interface {
concurrent.IConcurrent
Init(iService IService,getClientFun rpc.FuncRpcClient,getServerFun rpc.FuncRpcServer,serviceCfg interface{})
@@ -55,6 +58,7 @@ type Service struct {
serviceCfg interface{}
goroutineNum int32
startStatus bool
isRelease int32
retire int32
eventProcessor event.IEventProcessor
profiler *profiler.Profiler //性能分析器
@@ -145,6 +149,7 @@ func (s *Service) Init(iService IService,getClientFun rpc.FuncRpcClient,getServe
func (s *Service) Start() {
s.startStatus = true
atomic.StoreInt32(&s.isRelease,0)
var waitRun sync.WaitGroup
for i:=int32(0);i< s.goroutineNum;i++{
@@ -173,6 +178,7 @@ func (s *Service) Run() {
select {
case <- s.closeSig:
bStop = true
s.Release()
concurrent.Close()
case cb:=<-concurrentCBChannel:
concurrent.DoCallback(cb)
@@ -245,10 +251,6 @@ func (s *Service) Run() {
}
if bStop == true {
if atomic.AddInt32(&s.goroutineNum,-1)<=0 {
s.startStatus = false
s.Release()
}
break
}
}
@@ -271,8 +273,11 @@ func (s *Service) Release(){
log.Dump(string(buf[:l]),log.String("error",errString))
}
}()
s.self.OnRelease()
if atomic.AddInt32(&s.isRelease,-1) == -1{
s.self.OnRelease()
}
}
func (s *Service) OnRelease(){
@@ -293,6 +298,24 @@ func (s *Service) GetServiceCfg()interface{}{
return s.serviceCfg
}
func (s *Service) ParseServiceCfg(cfg interface{}) error{
if s.serviceCfg == nil {
return errors.New("no service configuration found")
}
rv := reflect.ValueOf(s.serviceCfg)
if rv.Kind() == reflect.Ptr && rv.IsNil() {
return errors.New("no service configuration found")
}
bytes,err := json.Marshal(s.serviceCfg)
if err != nil {
return err
}
return json.Unmarshal(bytes,cfg)
}
func (s *Service) GetProfiler() *profiler.Profiler{
return s.profiler
}
@@ -305,10 +328,6 @@ func (s *Service) UnRegEventReceiverFunc(eventType event.EventType, receiver eve
s.eventProcessor.UnRegEventReceiverFun(eventType, receiver)
}
func (s *Service) IsSingleCoroutine() bool {
return s.goroutineNum == 1
}
func (s *Service) RegRawRpc(rpcMethodId uint32,rawRpcCB rpc.RawRpcCallBack){
s.rpcHandler.RegRawRpc(rpcMethodId,rawRpcCB)
}
@@ -368,12 +387,12 @@ func (s *Service) UnRegNatsConnListener() {
func (s *Service) RegDiscoverListener(discoveryServiceListener rpc.IDiscoveryServiceListener) {
s.discoveryServiceLister = discoveryServiceListener
s.RegEventReceiverFunc(event.Sys_Event_DiscoverService,s.GetEventHandler(),s.OnDiscoverServiceEvent)
RegDiscoveryServiceEventFun(s.GetName())
RegRpcEventFun(s.GetName())
}
func (s *Service) UnRegDiscoverListener() {
s.UnRegEventReceiverFunc(event.Sys_Event_DiscoverService,s.GetEventHandler())
UnRegDiscoveryServiceEventFun(s.GetName())
UnRegRpcEventFun(s.GetName())
}
func (s *Service) PushRpcRequest(rpcRequest *rpc.RpcRequest) error{

View File

@@ -14,9 +14,6 @@ type RegDiscoveryServiceEventFunType func(serviceName string)
var RegRpcEventFun RegRpcEventFunType
var UnRegRpcEventFun RegRpcEventFunType
var RegDiscoveryServiceEventFun RegDiscoveryServiceEventFunType
var UnRegDiscoveryServiceEventFun RegDiscoveryServiceEventFunType
func init(){
mapServiceName = map[string]IService{}
setupServiceList = []IService{}
@@ -26,7 +23,7 @@ func Init() {
for _,s := range setupServiceList {
err := s.OnInit()
if err != nil {
log.SError("Failed to initialize "+s.GetName()+" service:"+err.Error())
log.Error("Failed to initialize "+s.GetName()+" service",log.ErrorAttr("err",err))
os.Exit(1)
}
}

View File

@@ -2,7 +2,6 @@ package ginmodule
import (
"context"
"datacenter/common/processor"
"github.com/duanhf2012/origin/v2/event"
"github.com/duanhf2012/origin/v2/log"
"github.com/duanhf2012/origin/v2/service"
@@ -10,34 +9,36 @@ import (
"log/slog"
"net/http"
"strings"
"time"
"io"
)
type IGinProcessor interface {
Process(data *gin.Context) (*gin.Context, error)
}
type GinModule struct {
service.Module
*GinConf
*gin.Engine
srv *http.Server
processor []processor.IGinProcessor
listenAddr string
handleTimeout time.Duration
processor []IGinProcessor
}
type GinConf struct {
Addr string
}
const Sys_Event_Gin_Event event.EventType = -11
func (gm *GinModule) Init(conf *GinConf, engine *gin.Engine) {
gm.GinConf = conf
func (gm *GinModule) Init(addr string, handleTimeout time.Duration,engine *gin.Engine) {
gm.listenAddr = addr
gm.handleTimeout = handleTimeout
gm.Engine = engine
}
func (gm *GinModule) SetupDataProcessor(processor ...processor.IGinProcessor) {
func (gm *GinModule) SetupDataProcessor(processor ...IGinProcessor) {
gm.processor = processor
}
func (gm *GinModule) AppendDataProcessor(processor ...processor.IGinProcessor) {
func (gm *GinModule) AppendDataProcessor(processor ...IGinProcessor) {
gm.processor = append(gm.processor, processor...)
}
@@ -47,27 +48,28 @@ func (gm *GinModule) OnInit() error {
}
gm.srv = &http.Server{
Addr: gm.Addr,
Addr: gm.listenAddr,
Handler: gm.Engine,
}
gm.Engine.Use(Logger())
gm.Engine.Use(gin.Recovery())
gm.GetEventProcessor().RegEventReceiverFunc(Sys_Event_Gin_Event, gm.GetEventHandler(), gm.eventHandler)
gm.GetEventProcessor().RegEventReceiverFunc(event.Sys_Event_Gin_Event, gm.GetEventHandler(), gm.eventHandler)
return nil
}
func (gm *GinModule) eventHandler(ev event.IEvent) {
ginEvent := ev.(*GinEvent)
for _, handler := range ginEvent.handlersChain {
handler(ginEvent.c)
handler(&ginEvent.c)
}
ginEvent.chanWait <- struct{}{}
//ginEvent.chanWait <- struct{}{}
}
func (gm *GinModule) Start() {
log.Info("http start listen", slog.Any("addr", gm.Addr))
gm.srv.Addr = gm.listenAddr
log.Info("http start listen", slog.Any("addr", gm.listenAddr))
go func() {
err := gm.srv.ListenAndServe()
if err != nil {
@@ -77,7 +79,7 @@ func (gm *GinModule) Start() {
}
func (gm *GinModule) StartTLS(certFile, keyFile string) {
log.Info("http start listen", slog.Any("addr", gm.Addr))
log.Info("http start listen", slog.Any("addr", gm.listenAddr))
go func() {
err := gm.srv.ListenAndServeTLS(certFile, keyFile)
if err != nil {
@@ -92,17 +94,102 @@ func (gm *GinModule) Stop(ctx context.Context) {
}
}
type GinEvent struct {
handlersChain gin.HandlersChain
type SafeContext struct {
*gin.Context
chanWait chan struct{}
c *gin.Context
}
func (c *SafeContext) JSONAndDone(code int, obj any) {
c.Context.JSON(code,obj)
c.Done()
}
func (c *SafeContext) AsciiJSONAndDone(code int, obj any){
c.Context.AsciiJSON(code,obj)
c.Done()
}
func (c *SafeContext) PureJSONAndDone(code int, obj any){
c.Context.PureJSON(code,obj)
c.Done()
}
func (c *SafeContext) XMLAndDone(code int, obj any){
c.Context.XML(code,obj)
c.Done()
}
func (c *SafeContext) YAMLAndDone(code int, obj any){
c.Context.YAML(code,obj)
c.Done()
}
func (c *SafeContext) TOMLAndDone(code int, obj any){
c.Context.TOML(code,obj)
c.Done()
}
func (c *SafeContext) ProtoBufAndDone(code int, obj any){
c.Context.ProtoBuf(code,obj)
c.Done()
}
func (c *SafeContext) StringAndDone(code int, format string, values ...any){
c.Context.String(code,format,values...)
c.Done()
}
func (c *SafeContext) RedirectAndDone(code int, location string){
c.Context.Redirect(code,location)
c.Done()
}
func (c *SafeContext) DataAndDone(code int, contentType string, data []byte){
c.Context.Data(code,contentType,data)
c.Done()
}
func (c *SafeContext) DataFromReaderAndDone(code int, contentLength int64, contentType string, reader io.Reader, extraHeaders map[string]string){
c.DataFromReader(code,contentLength,contentType,reader,extraHeaders)
c.Done()
}
func (c *SafeContext) HTMLAndDone(code int, name string, obj any){
c.Context.HTML(code,name,obj)
c.Done()
}
func (c *SafeContext) IndentedJSONAndDone(code int, obj any){
c.Context.IndentedJSON(code,obj)
c.Done()
}
func (c *SafeContext) SecureJSONAndDone(code int, obj any){
c.Context.SecureJSON(code,obj)
c.Done()
}
func (c *SafeContext) JSONPAndDone(code int, obj any){
c.Context.JSONP(code,obj)
c.Done()
}
func (c *SafeContext) Done(){
c.chanWait <- struct{}{}
}
type GinEvent struct {
handlersChain []SafeHandlerFunc
c SafeContext
}
type SafeHandlerFunc func(*SafeContext)
func (ge *GinEvent) GetEventType() event.EventType {
return Sys_Event_Gin_Event
return event.Sys_Event_Gin_Event
}
func (gm *GinModule) handleMethod(httpMethod, relativePath string, handlers ...gin.HandlerFunc) gin.IRoutes {
func (gm *GinModule) handleMethod(httpMethod, relativePath string, handlers ...SafeHandlerFunc) gin.IRoutes {
return gm.Engine.Handle(httpMethod, relativePath, func(c *gin.Context) {
for _, p := range gm.processor {
_, err := p.Process(c)
@@ -112,33 +199,71 @@ func (gm *GinModule) handleMethod(httpMethod, relativePath string, handlers ...g
}
var ev GinEvent
chanWait := make(chan struct{})
ev.chanWait = chanWait
chanWait := make(chan struct{},2)
ev.c.chanWait = chanWait
ev.handlersChain = handlers
ev.c = c
ev.c.Context = c
gm.NotifyEvent(&ev)
<-chanWait
ctx,cancel := context.WithTimeout(context.Background(), gm.handleTimeout)
defer cancel()
select{
case <-ctx.Done():
log.Error("GinModule process timeout", slog.Any("path", c.Request.URL.Path))
c.AbortWithStatus(http.StatusRequestTimeout)
case <-chanWait:
}
})
}
func (gm *GinModule) SafeGET(relativePath string, handlers ...gin.HandlerFunc) gin.IRoutes {
// GET 回调处理是在gin协程中
func (gm *GinModule) GET(relativePath string, handlers ...gin.HandlerFunc) gin.IRoutes {
return gm.Engine.GET(relativePath, handlers...)
}
// POST 回调处理是在gin协程中
func (gm *GinModule) POST(relativePath string, handlers ...gin.HandlerFunc) gin.IRoutes {
return gm.Engine.POST(relativePath, handlers...)
}
// DELETE 回调处理是在gin协程中
func (gm *GinModule) DELETE(relativePath string, handlers ...gin.HandlerFunc) gin.IRoutes {
return gm.Engine.DELETE(relativePath, handlers...)
}
// PATCH 回调处理是在gin协程中
func (gm *GinModule) PATCH(relativePath string, handlers ...gin.HandlerFunc) gin.IRoutes {
return gm.Engine.PATCH(relativePath, handlers...)
}
// Put 回调处理是在gin协程中
func (gm *GinModule) Put(relativePath string, handlers ...gin.HandlerFunc) gin.IRoutes {
return gm.Engine.PUT(relativePath, handlers...)
}
// SafeGET 回调处理是在service协程中
func (gm *GinModule) SafeGET(relativePath string, handlers ...SafeHandlerFunc) gin.IRoutes {
return gm.handleMethod(http.MethodGet, relativePath, handlers...)
}
func (gm *GinModule) SafePOST(relativePath string, handlers ...gin.HandlerFunc) gin.IRoutes {
// SafePOST 回调处理是在service协程中
func (gm *GinModule) SafePOST(relativePath string, handlers ...SafeHandlerFunc) gin.IRoutes {
return gm.handleMethod(http.MethodPost, relativePath, handlers...)
}
func (gm *GinModule) SafeDELETE(relativePath string, handlers ...gin.HandlerFunc) gin.IRoutes {
// SafeDELETE 回调处理是在service协程中
func (gm *GinModule) SafeDELETE(relativePath string, handlers ...SafeHandlerFunc) gin.IRoutes {
return gm.handleMethod(http.MethodDelete, relativePath, handlers...)
}
func (gm *GinModule) SafePATCH(relativePath string, handlers ...gin.HandlerFunc) gin.IRoutes {
// SafePATCH 回调处理是在service协程中
func (gm *GinModule) SafePATCH(relativePath string, handlers ...SafeHandlerFunc) gin.IRoutes {
return gm.handleMethod(http.MethodPatch, relativePath, handlers...)
}
func (gm *GinModule) SafePut(relativePath string, handlers ...gin.HandlerFunc) gin.IRoutes {
// SafePut 回调处理是在service协程中
func (gm *GinModule) SafePut(relativePath string, handlers ...SafeHandlerFunc) gin.IRoutes {
return gm.handleMethod(http.MethodPut, relativePath, handlers...)
}

View File

@@ -2,7 +2,7 @@ package ginmodule
import (
"fmt"
"github.com/duanhf2012/origin/log"
"github.com/duanhf2012/origin/v2/log"
"github.com/gin-gonic/gin"
"time"
)

View File

@@ -0,0 +1,65 @@
package kafkamodule
import "github.com/IBM/sarama"
type KafkaAdmin struct {
sarama.ClusterAdmin
mapTopic map[string]sarama.TopicDetail
}
func (ka *KafkaAdmin) Setup(kafkaVersion string, addrs []string) error {
config := sarama.NewConfig()
var err error
config.Version, err = sarama.ParseKafkaVersion(kafkaVersion)
if err != nil {
return err
}
ka.ClusterAdmin, err = sarama.NewClusterAdmin(addrs, config)
if err != nil {
return err
}
ka.mapTopic, err = ka.GetTopics()
if err != nil {
ka.ClusterAdmin.Close()
return err
}
return nil
}
func (ka *KafkaAdmin) RefreshTopic() error {
var err error
ka.mapTopic, err = ka.GetTopics()
return err
}
func (ka *KafkaAdmin) HasTopic(topic string) bool {
_, ok := ka.mapTopic[topic]
return ok
}
func (ka *KafkaAdmin) GetTopicDetail(topic string) *sarama.TopicDetail {
topicDetail, ok := ka.mapTopic[topic]
if ok == false {
return nil
}
return &topicDetail
}
func (ka *KafkaAdmin) GetTopics() (map[string]sarama.TopicDetail, error) {
return ka.ListTopics()
}
// CreateTopic 创建主题
// numPartitions分区数
// replicationFactor副本数
// validateOnly参数执行操作时只进行参数验证而不实际执行操作
func (ka *KafkaAdmin) CreateTopic(topic string, numPartitions int32, replicationFactor int16, validateOnly bool) error {
return ka.ClusterAdmin.CreateTopic(topic, &sarama.TopicDetail{NumPartitions: numPartitions, ReplicationFactor: replicationFactor}, validateOnly)
}

View File

@@ -0,0 +1,289 @@
package kafkamodule
import (
"context"
"fmt"
"github.com/IBM/sarama"
"github.com/duanhf2012/origin/v2/log"
"sync"
"time"
)
type ConsumerGroup struct {
sarama.ConsumerGroup
waitGroup sync.WaitGroup
chanExit chan error
ready chan bool
cancel context.CancelFunc
groupId string
}
func NewConsumerConfig(kafkaVersion string, assignor string, offsetsInitial int64) (*sarama.Config, error) {
var err error
config := sarama.NewConfig()
config.Version, err = sarama.ParseKafkaVersion(kafkaVersion)
config.Consumer.Offsets.Initial = offsetsInitial
config.Consumer.Offsets.AutoCommit.Enable = false
switch assignor {
case "sticky":
// 黏性roundRobin,rebalance之后首先保证前面的分配,从后面剥离
// topic:T0{P0,P1,P2,P3,P4,P5},消费者:C1,C2
// ---------------before rebalance:即roundRobin
// C1: T0{P0} T0{P2} T0{P4}
// C2: T0{P1} T0{P3} T0{P5}
// ----------------after rebalance:增加了一个消费者
// C1: T0{P0} T0{P2}
// C2: T0{P1} T0{P3}
// C3: T0{P4} T0{P5} until每个消费者的分区数误差不超过1
config.Consumer.Group.Rebalance.GroupStrategies = []sarama.BalanceStrategy{sarama.NewBalanceStrategySticky()}
case "roundrobin":
// roundRobin --逐个平均分发
// topic: T0{P0,P1,P2},T1{P0,P1,P2,P3}两个消费者C1,C2
// C1: T0{P0} T0{P2} T1{P1} T1{P3}
// C2: T0{P1} T1{P0} T1{P2}
config.Consumer.Group.Rebalance.GroupStrategies = []sarama.BalanceStrategy{sarama.NewBalanceStrategyRoundRobin()}
case "range":
// 默认值 --一次平均分发
// topic: T0{P0,P1,P2,P3},T1{P0,P1,P2,P3},两个消费者C1,C2
// T1分区总数6 / 消费者数2 = 3 ,即该会话的分区每个消费者分3个
// T2分区总数4 / 消费者数2 = 2 ,即该会话的分区每个消费者分2个
// C1: T0{P0, P1, P2} T1{P0, P1}
// C2: T0{P3, P4, P5} T1{P2, P3}
config.Consumer.Group.Rebalance.GroupStrategies = []sarama.BalanceStrategy{sarama.NewBalanceStrategyRange()}
default:
return nil, fmt.Errorf("Unrecognized consumer group partition assignor: %s", assignor)
}
if err != nil {
return nil, err
}
return config, nil
}
type IMsgReceiver interface {
Receiver(msgs []*sarama.ConsumerMessage) bool
}
func (c *ConsumerGroup) Setup(addr []string, topics []string, groupId string, config *sarama.Config, receiverInterval time.Duration, maxReceiverNum int, msgReceiver IMsgReceiver) error {
var err error
c.ConsumerGroup, err = sarama.NewConsumerGroup(addr, groupId, config)
if err != nil {
return nil
}
c.groupId = groupId
c.chanExit = make(chan error, 1)
var handler ConsumerGroupHandler
handler.receiver = msgReceiver
handler.maxReceiverNum = maxReceiverNum
handler.receiverInterval = receiverInterval
handler.chanExit = c.chanExit
var ctx context.Context
ctx, c.cancel = context.WithCancel(context.Background())
c.waitGroup.Add(1)
go func() {
defer c.waitGroup.Done()
for {
if err = c.Consume(ctx, topics, &handler); err != nil {
// 当setup失败的时候error会返回到这里
log.Error("Error from consumer", log.Any("err", err))
return
}
// check if context was cancelled, signaling that the consumer should stop
if ctx.Err() != nil {
log.Info("consumer stop", log.Any("info", ctx.Err()))
}
c.chanExit <- err
}
}()
err = <-c.chanExit
//已经准备好了
return err
}
func (c *ConsumerGroup) Close() {
log.Info("close consumerGroup")
//1.cancel掉
c.cancel()
//2.关闭连接
err := c.ConsumerGroup.Close()
if err != nil {
log.Error("close consumerGroup fail", log.Any("err", err.Error()))
}
//3.等待退出
c.waitGroup.Wait()
}
type ConsumerGroupHandler struct {
receiver IMsgReceiver
receiverInterval time.Duration
maxReceiverNum int
//mapTopicOffset map[string]map[int32]int //map[topic]map[partitionId]offsetInfo
mapTopicData map[string]*MsgData
mx sync.Mutex
chanExit chan error
isRebalance bool //是否为再平衡
//stopSig *int32
}
type MsgData struct {
sync.Mutex
msg []*sarama.ConsumerMessage
mapPartitionOffset map[int32]int64
}
func (ch *ConsumerGroupHandler) Flush(session sarama.ConsumerGroupSession, topic string) {
if topic != "" {
msgData := ch.GetMsgData(topic)
msgData.flush(session, ch.receiver, topic)
return
}
for tp, msgData := range ch.mapTopicData {
msgData.flush(session, ch.receiver, tp)
}
}
func (ch *ConsumerGroupHandler) GetMsgData(topic string) *MsgData {
ch.mx.Lock()
defer ch.mx.Unlock()
msgData := ch.mapTopicData[topic]
if msgData == nil {
msgData = &MsgData{}
msgData.msg = make([]*sarama.ConsumerMessage, 0, ch.maxReceiverNum)
ch.mapTopicData[topic] = msgData
}
return msgData
}
func (md *MsgData) flush(session sarama.ConsumerGroupSession, receiver IMsgReceiver, topic string) {
if len(md.msg) == 0 {
return
}
//发送给接收者
for {
ok := receiver.Receiver(md.msg)
if ok == true {
break
}
}
for pId, offset := range md.mapPartitionOffset {
session.MarkOffset(topic, pId, offset+1, "")
log.Info(fmt.Sprintf("topic %s,pid %d,offset %d", topic, pId, offset+1))
}
session.Commit()
//log.Info("commit")
//time.Sleep(1000 * time.Second)
//置空
md.msg = md.msg[:0]
clear(md.mapPartitionOffset)
}
func (md *MsgData) appendMsg(session sarama.ConsumerGroupSession, msg *sarama.ConsumerMessage, receiver IMsgReceiver, maxReceiverNum int) {
md.Lock()
defer md.Unlock()
//收到的offset只会越来越大在
if md.mapPartitionOffset == nil {
md.mapPartitionOffset = make(map[int32]int64, 10)
}
md.mapPartitionOffset[msg.Partition] = msg.Offset
md.msg = append(md.msg, msg)
if len(md.msg) < maxReceiverNum {
return
}
md.flush(session, receiver, msg.Topic)
}
func (ch *ConsumerGroupHandler) AppendMsg(session sarama.ConsumerGroupSession, msg *sarama.ConsumerMessage) {
dataMsg := ch.GetMsgData(msg.Topic)
dataMsg.appendMsg(session, msg, ch.receiver, ch.maxReceiverNum)
}
func (ch *ConsumerGroupHandler) Setup(session sarama.ConsumerGroupSession) error {
ch.mapTopicData = make(map[string]*MsgData, 128)
if ch.isRebalance == false {
ch.chanExit <- nil
}
ch.isRebalance = true
return nil
}
func (ch *ConsumerGroupHandler) Cleanup(session sarama.ConsumerGroupSession) error {
ch.Flush(session, "")
return nil
}
func (ch *ConsumerGroupHandler) ConsumeClaim(session sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error {
ticker := time.NewTicker(ch.receiverInterval)
for {
select {
case msg := <-claim.Messages():
if msg == nil {
log.SWarning("claim will exit", log.Any("topic", claim.Topic()), log.Any("Partition", claim.Partition()))
return nil
}
ch.AppendMsg(session, msg)
case <-ticker.C:
ch.Flush(session, claim.Topic())
case <-session.Context().Done():
return nil
}
}
}
/*
阿里云参数说明https://sls.aliyun.com/doc/oscompatibledemo/sarama_go_kafka_consume.html
conf.Net.TLS.Enable = true
conf.Net.SASL.Enable = true
conf.Net.SASL.User = project
conf.Net.SASL.Password = fmt.Sprintf("%s#%s", accessId, accessKey)
conf.Net.SASL.Mechanism = "PLAIN"
conf.Net.TLS.Enable = true
conf.Net.SASL.Enable = true
conf.Net.SASL.User = project
conf.Net.SASL.Password = fmt.Sprintf("%s#%s", accessId, accessKey)
conf.Net.SASL.Mechanism = "PLAIN"
conf.Consumer.Fetch.Min = 1
conf.Consumer.Fetch.Default = 1024 * 1024
conf.Consumer.Retry.Backoff = 2 * time.Second
conf.Consumer.MaxWaitTime = 250 * time.Millisecond
conf.Consumer.MaxProcessingTime = 100 * time.Millisecond
conf.Consumer.Return.Errors = false
conf.Consumer.Offsets.AutoCommit.Enable = true
conf.Consumer.Offsets.AutoCommit.Interval = 1 * time.Second
conf.Consumer.Offsets.Initial = sarama.OffsetOldest
conf.Consumer.Offsets.Retry.Max = 3
*/

View File

@@ -0,0 +1,146 @@
package kafkamodule
import (
"context"
"github.com/IBM/sarama"
"github.com/duanhf2012/origin/v2/log"
"github.com/duanhf2012/origin/v2/service"
"time"
)
type IProducer interface {
}
type SyncProducer struct {
}
type AsyncProducer struct {
}
type Producer struct {
service.Module
sarama.SyncProducer
sarama.AsyncProducer
}
// NewProducerConfig 新建producerConfig
// kafkaVersion kafka版本
// returnErrreturnSucc 是否返回错误与成功
// requiredAcks -1 #全量同步确认,强可靠性保证(当所有的 leader 和 follower 都接收成功时)#WaitForAll 1 #leader 确认收到, 默认(仅 leader 反馈)#WaitForLocal 0 #不确认,但是吞吐量大(不 care 结果) #NoResponse
// Idempotent幂等 确保信息都准确写入一份副本,用于幂等生产者当这一项设置为true的时候生产者将保证生产的消息一定是有序且精确一次的
// partitioner 生成分区器,用于选择向哪个分区发送信息,默认情况下对消息密钥进行散列
func NewProducerConfig(kafkaVersion string, returnErr bool, returnSucc bool, requiredAcks sarama.RequiredAcks, Idempotent bool,
partitioner sarama.PartitionerConstructor) (*sarama.Config, error) {
config := sarama.NewConfig()
var err error
config.Version, err = sarama.ParseKafkaVersion(kafkaVersion)
if err != nil {
return nil, err
}
config.Producer.Return.Errors = returnErr
config.Producer.Return.Successes = returnSucc
config.Producer.RequiredAcks = requiredAcks
config.Producer.Partitioner = partitioner
config.Producer.Timeout = 10 * time.Second
config.Producer.Idempotent = Idempotent
if Idempotent == true {
config.Net.MaxOpenRequests = 1
}
return config, nil
}
func (p *Producer) SyncSetup(addr []string, config *sarama.Config) error {
var err error
p.SyncProducer, err = sarama.NewSyncProducer(addr, config)
if err != nil {
return err
}
return nil
}
func (p *Producer) ASyncSetup(addr []string, config *sarama.Config) error {
var err error
p.AsyncProducer, err = sarama.NewAsyncProducer(addr, config)
if err != nil {
return err
}
go func() {
p.asyncRun()
}()
return nil
}
func (p *Producer) asyncRun() {
for {
select {
case sm := <-p.Successes():
if sm.Metadata == nil {
break
}
asyncReturn := sm.Metadata.(*AsyncReturn)
asyncReturn.chanReturn <- asyncReturn
case em := <-p.Errors():
log.Error("async kafkamodule error", log.ErrorAttr("err", em.Err))
if em.Msg.Metadata == nil {
break
}
asyncReturn := em.Msg.Metadata.(*AsyncReturn)
asyncReturn.Err = em.Err
asyncReturn.chanReturn <- asyncReturn
}
}
}
type AsyncReturn struct {
Msg *sarama.ProducerMessage
Err error
chanReturn chan *AsyncReturn
}
func (ar *AsyncReturn) WaitOk(ctx context.Context) (*sarama.ProducerMessage, error) {
asyncReturn := ar.Msg.Metadata.(*AsyncReturn)
select {
case <-asyncReturn.chanReturn:
return asyncReturn.Msg, asyncReturn.Err
case <-ctx.Done():
return nil, ctx.Err()
}
}
func (p *Producer) AsyncSendMessage(msg *sarama.ProducerMessage) *AsyncReturn {
asyncReturn := AsyncReturn{Msg: msg, chanReturn: make(chan *AsyncReturn, 1)}
msg.Metadata = &asyncReturn
p.AsyncProducer.Input() <- msg
return &asyncReturn
}
func (p *Producer) AsyncPushMessage(msg *sarama.ProducerMessage) {
p.AsyncProducer.Input() <- msg
}
func (p *Producer) Close() {
if p.SyncProducer != nil {
p.SyncProducer.Close()
p.SyncProducer = nil
}
if p.AsyncProducer != nil {
p.AsyncProducer.Close()
p.AsyncProducer = nil
}
}
func (p *Producer) SendMessage(msg *sarama.ProducerMessage) (partition int32, offset int64, err error) {
return p.SyncProducer.SendMessage(msg)
}
func (p *Producer) SendMessages(msgs []*sarama.ProducerMessage) error {
return p.SyncProducer.SendMessages(msgs)
}

View File

@@ -0,0 +1,151 @@
package kafkamodule
import (
"context"
"fmt"
"github.com/IBM/sarama"
"testing"
"time"
)
// 对各参数和机制名称的说明https://blog.csdn.net/u013311345/article/details/129217728
type MsgReceiver struct {
t *testing.T
}
func (mr *MsgReceiver) Receiver(msgs []*sarama.ConsumerMessage) bool {
for _, m := range msgs {
mr.t.Logf("time:%s, topic:%s, partition:%d, offset:%d, key:%s, value:%s", time.Now().Format("2006-01-02 15:04:05.000"), m.Topic, m.Partition, m.Offset, m.Key, string(m.Value))
}
return true
}
var addr = []string{"192.168.13.24:9092", "192.168.13.24:9093", "192.168.13.24:9094", "192.168.13.24:9095"}
var topicName = []string{"test_topic_1", "test_topic_2"}
var kafkaVersion = "3.3.1"
func producer(t *testing.T) {
var admin KafkaAdmin
err := admin.Setup(kafkaVersion, addr)
if err != nil {
t.Fatal(err)
}
for _, tName := range topicName {
if admin.HasTopic(tName) == false {
err = admin.CreateTopic(tName, 2, 2, false)
t.Log(err)
}
}
var pd Producer
cfg, err := NewProducerConfig(kafkaVersion, true, true, sarama.WaitForAll, false, sarama.NewHashPartitioner)
if err != nil {
t.Fatal(err)
}
err = pd.SyncSetup(addr, cfg)
if err != nil {
t.Fatal(err)
}
now := time.Now()
//msgs := make([]*sarama.ProducerMessage, 0, 20000)
for i := 0; i < 20000; i++ {
var msg sarama.ProducerMessage
msg.Key = sarama.StringEncoder(fmt.Sprintf("%d", i))
msg.Topic = topicName[0]
msg.Value = sarama.StringEncoder(fmt.Sprintf("i'm %d", i))
pd.SendMessage(&msg)
//msgs = append(msgs, &msg)
}
//err = pd.SendMessages(msgs)
//t.Log(err)
t.Log(time.Now().Sub(now).Milliseconds())
pd.Close()
}
func producer_async(t *testing.T) {
var admin KafkaAdmin
err := admin.Setup(kafkaVersion, addr)
if err != nil {
t.Fatal(err)
}
for _, tName := range topicName {
if admin.HasTopic(tName) == false {
err = admin.CreateTopic(tName, 10, 2, false)
t.Log(err)
}
}
var pd Producer
cfg, err := NewProducerConfig(kafkaVersion, true, true, sarama.WaitForAll, false, sarama.NewHashPartitioner)
if err != nil {
t.Fatal(err)
}
err = pd.ASyncSetup(addr, cfg)
if err != nil {
t.Fatal(err)
}
now := time.Now()
msgs := make([]*AsyncReturn, 0, 20000)
for i := 0; i < 200000; i++ {
var msg sarama.ProducerMessage
msg.Key = sarama.StringEncoder(fmt.Sprintf("%d", i))
msg.Topic = topicName[0]
msg.Value = sarama.StringEncoder(fmt.Sprintf("i'm %d", i))
r := pd.AsyncSendMessage(&msg)
msgs = append(msgs, r)
}
//err = pd.SendMessages(msgs)
//t.Log(err)
for _, r := range msgs {
r.WaitOk(context.Background())
//t.Log(m, e)
}
t.Log(time.Now().Sub(now).Milliseconds())
time.Sleep(1000 * time.Second)
pd.Close()
}
func consumer(t *testing.T) {
var admin KafkaAdmin
err := admin.Setup(kafkaVersion, addr)
if err != nil {
t.Fatal(err)
}
for _, tName := range topicName {
if admin.HasTopic(tName) == false {
err = admin.CreateTopic(tName, 10, 2, false)
t.Log(err)
}
}
var cg ConsumerGroup
cfg, err := NewConsumerConfig(kafkaVersion, "sticky", sarama.OffsetOldest)
if err != nil {
t.Fatal(err)
}
err = cg.Setup(addr, topicName, "test_groupId", cfg, 50*time.Second, 10, &MsgReceiver{t: t})
t.Log(err)
time.Sleep(10000 * time.Second)
cg.Close()
}
func TestConsumerAndProducer(t *testing.T) {
producer_async(t)
//go producer(t)
//producer(t)
//consumer(t)
}

View File

@@ -0,0 +1,7 @@
package kafkamodule
type Sasl struct {
UserName string `json:"UserName"`
Passwd string `json:"Passwd"`
InstanceId string `json:"InstanceId"`
}

View File

@@ -5,7 +5,6 @@ import (
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
"go.mongodb.org/mongo-driver/x/bsonx"
"time"
)
@@ -48,6 +47,10 @@ func (mm *MongoModule) Start() error {
return nil
}
func (mm *MongoModule) Stop() error {
return mm.client.Disconnect(context.Background())
}
func (mm *MongoModule) TakeSession() Session {
return Session{Client: mm.client, maxOperatorTimeOut: mm.maxOperatorTimeOut}
}
@@ -86,12 +89,12 @@ func (s *Session) EnsureUniqueIndex(db string, collection string, indexKeys [][]
func (s *Session) ensureIndex(db string, collection string, indexKeys [][]string, bBackground bool, unique bool, sparse bool, asc bool) error {
var indexes []mongo.IndexModel
for _, keys := range indexKeys {
keysDoc := bsonx.Doc{}
keysDoc := bson.D{}
for _, key := range keys {
if asc {
keysDoc = keysDoc.Append(key, bsonx.Int32(1))
keysDoc = append(keysDoc, bson.E{Key:key,Value:1})
} else {
keysDoc = keysDoc.Append(key, bsonx.Int32(-1))
keysDoc = append(keysDoc, bson.E{Key:key,Value:-1})
}
}

View File

@@ -16,7 +16,7 @@ type CustomerSubscriber struct {
rpc.IRpcHandler
topic string
subscriber *Subscriber
fromNodeId int
fromNodeId string
callBackRpcMethod string
serviceName string
StartIndex uint64
@@ -37,7 +37,7 @@ const (
MethodLast SubscribeMethod = 1 //Last模式以该消费者上次记录的位置开始订阅
)
func (cs *CustomerSubscriber) trySetSubscriberBaseInfo(rpcHandler rpc.IRpcHandler, ss *Subscriber, topic string, subscribeMethod SubscribeMethod, customerId string, fromNodeId int, callBackRpcMethod string, startIndex uint64, oneBatchQuantity int32) error {
func (cs *CustomerSubscriber) trySetSubscriberBaseInfo(rpcHandler rpc.IRpcHandler, ss *Subscriber, topic string, subscribeMethod SubscribeMethod, customerId string, fromNodeId string, callBackRpcMethod string, startIndex uint64, oneBatchQuantity int32) error {
cs.subscriber = ss
cs.fromNodeId = fromNodeId
cs.callBackRpcMethod = callBackRpcMethod
@@ -85,7 +85,7 @@ func (cs *CustomerSubscriber) trySetSubscriberBaseInfo(rpcHandler rpc.IRpcHandle
}
// 开始订阅
func (cs *CustomerSubscriber) Subscribe(rpcHandler rpc.IRpcHandler, ss *Subscriber, topic string, subscribeMethod SubscribeMethod, customerId string, fromNodeId int, callBackRpcMethod string, startIndex uint64, oneBatchQuantity int32) error {
func (cs *CustomerSubscriber) Subscribe(rpcHandler rpc.IRpcHandler, ss *Subscriber, topic string, subscribeMethod SubscribeMethod, customerId string, fromNodeId string, callBackRpcMethod string, startIndex uint64, oneBatchQuantity int32) error {
err := cs.trySetSubscriberBaseInfo(rpcHandler, ss, topic, subscribeMethod, customerId, fromNodeId, callBackRpcMethod, startIndex, oneBatchQuantity)
if err != nil {
return err

View File

@@ -122,5 +122,5 @@ func (ms *MessageQueueService) RPC_Publish(inParam *rpc.DBQueuePublishReq, outPa
func (ms *MessageQueueService) RPC_Subscribe(req *rpc.DBQueueSubscribeReq, res *rpc.DBQueueSubscribeRes) error {
topicRoom := ms.GetTopicRoom(req.TopicName)
return topicRoom.TopicSubscribe(ms.GetRpcHandler(), req.SubType, int32(req.Method), int(req.FromNodeId), req.RpcMethod, req.TopicName, req.CustomerId, req.StartIndex, req.OneBatchQuantity)
return topicRoom.TopicSubscribe(ms.GetRpcHandler(), req.SubType, int32(req.Method), req.FromNodeId, req.RpcMethod, req.TopicName, req.CustomerId, req.StartIndex, req.OneBatchQuantity)
}

View File

@@ -31,7 +31,7 @@ func (ss *Subscriber) PersistTopicData(topic string, topics []TopicData, retryCo
return ss.dataPersist.PersistTopicData(topic, topics, retryCount)
}
func (ss *Subscriber) TopicSubscribe(rpcHandler rpc.IRpcHandler, subScribeType rpc.SubscribeType, subscribeMethod SubscribeMethod, fromNodeId int, callBackRpcMethod string, topic string, customerId string, StartIndex uint64, oneBatchQuantity int32) error {
func (ss *Subscriber) TopicSubscribe(rpcHandler rpc.IRpcHandler, subScribeType rpc.SubscribeType, subscribeMethod SubscribeMethod, fromNodeId string, callBackRpcMethod string, topic string, customerId string, StartIndex uint64, oneBatchQuantity int32) error {
//取消订阅时
if subScribeType == rpc.SubscribeType_Unsubscribe {
ss.UnSubscribe(customerId)

View File

@@ -263,7 +263,7 @@ func (mp *MongoPersist) JugeTimeoutSave() bool{
func (mp *MongoPersist) persistCoroutine(){
defer mp.waitGroup.Done()
for atomic.LoadInt32(&mp.stop)==0 || mp.hasPersistData(){
for atomic.LoadInt32(&mp.stop)==0 {
//间隔时间sleep
time.Sleep(time.Second*1)

View File

@@ -107,12 +107,16 @@ func (tcpService *TcpService) TcpEventHandler(ev event.IEvent) {
case TPT_DisConnected:
tcpService.process.DisConnectedRoute(pack.ClientId)
case TPT_UnknownPack:
tcpService.process.UnknownMsgRoute(pack.ClientId,pack.Data)
tcpService.process.UnknownMsgRoute(pack.ClientId,pack.Data,tcpService.recyclerReaderBytes)
case TPT_Pack:
tcpService.process.MsgRoute(pack.ClientId,pack.Data)
tcpService.process.MsgRoute(pack.ClientId,pack.Data,tcpService.recyclerReaderBytes)
}
}
func (tcpService *TcpService) recyclerReaderBytes(data []byte) {
}
func (tcpService *TcpService) SetProcessor(process processor.IProcessor,handler event.IEventHandler){
tcpService.process = process
tcpService.RegEventReceiverFunc(event.Sys_Event_Tcp,handler, tcpService.TcpEventHandler)

View File

@@ -95,9 +95,9 @@ func (ws *WSService) WSEventHandler(ev event.IEvent) {
case WPT_DisConnected:
pack.MsgProcessor.DisConnectedRoute(pack.ClientId)
case WPT_UnknownPack:
pack.MsgProcessor.UnknownMsgRoute(pack.ClientId,pack.Data)
pack.MsgProcessor.UnknownMsgRoute(pack.ClientId,pack.Data,ws.recyclerReaderBytes)
case WPT_Pack:
pack.MsgProcessor.MsgRoute(pack.ClientId,pack.Data)
pack.MsgProcessor.MsgRoute(pack.ClientId,pack.Data,ws.recyclerReaderBytes)
}
}
@@ -129,7 +129,7 @@ func (slf *WSClient) Run() {
for{
bytes,err := slf.wsConn.ReadMsg()
if err != nil {
log.Debug("read client id %d is error:%+v",slf.id,err)
log.Debug("read client id %s is error:%+v",slf.id,err)
break
}
data,err:=slf.wsService.process.Unmarshal(slf.id,bytes)
@@ -153,7 +153,7 @@ func (ws *WSService) SendMsg(clientid string,msg interface{}) error{
client,ok := ws.mapClient[clientid]
if ok == false{
ws.mapClientLocker.Unlock()
return fmt.Errorf("client %d is disconnect!",clientid)
return fmt.Errorf("client %s is disconnect!",clientid)
}
ws.mapClientLocker.Unlock()
@@ -180,3 +180,5 @@ func (ws *WSService) Close(clientid string) {
return
}
func (ws *WSService) recyclerReaderBytes(data []byte) {
}