mirror of
https://github.com/duanhf2012/origin.git
synced 2026-02-07 01:04:41 +08:00
Compare commits
33 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
afb04cac7f | ||
|
|
975cf93d58 | ||
|
|
c7e0fcbdbb | ||
|
|
5bea050f63 | ||
|
|
95b4e2f8de | ||
|
|
5601ab5ae2 | ||
|
|
d28094eefa | ||
|
|
68dfbc46f0 | ||
|
|
80c73b0bdb | ||
|
|
d9afeed6ee | ||
|
|
a32ff59676 | ||
|
|
2101c8903c | ||
|
|
5214f094bf | ||
|
|
fd364cf579 | ||
|
|
1eab31209c | ||
|
|
2da3ccae39 | ||
|
|
da18cf3158 | ||
|
|
c3484e9d5b | ||
|
|
b87a78c85b | ||
|
|
17a448f75c | ||
|
|
d87ad419c8 | ||
|
|
298a5d3721 | ||
|
|
64fb9368bf | ||
|
|
7f93aa5ff9 | ||
|
|
7a8d312aeb | ||
|
|
f931f61f7b | ||
|
|
151ed123f4 | ||
|
|
5a6a4c8a0d | ||
|
|
280c04a5d7 | ||
|
|
1520dae223 | ||
|
|
84f3429564 | ||
|
|
89fd5d273b | ||
|
|
3ce873ef04 |
12
README.md
12
README.md
@@ -56,7 +56,7 @@ cluster.json如下:
|
||||
"ListenAddr":"127.0.0.1:8001",
|
||||
"MaxRpcParamLen": 409600,
|
||||
"NodeName": "Node_Test1",
|
||||
"remark":"//以_打头的,表示只在本机进程,不对整个子网开发",
|
||||
"remark":"//以_打头的,表示只在本机进程,不对整个子网公开",
|
||||
"ServiceList": ["TestService1","TestService2","TestServiceCall","GateService","_TcpService","HttpService","WSService"]
|
||||
},
|
||||
{
|
||||
@@ -65,7 +65,7 @@ cluster.json如下:
|
||||
"ListenAddr":"127.0.0.1:8002",
|
||||
"MaxRpcParamLen": 409600,
|
||||
"NodeName": "Node_Test1",
|
||||
"remark":"//以_打头的,表示只在本机进程,不对整个子网开发",
|
||||
"remark":"//以_打头的,表示只在本机进程,不对整个子网公开",
|
||||
"ServiceList": ["TestService1","TestService2","TestServiceCall","GateService","TcpService","HttpService","WSService"]
|
||||
}
|
||||
]
|
||||
@@ -96,6 +96,7 @@ service.json如下:
|
||||
"ReadTimeout":10000,
|
||||
"WriteTimeout":10000,
|
||||
"ProcessTimeout":10000,
|
||||
"ManualStart": false,
|
||||
"CAFile":[
|
||||
{
|
||||
"Certfile":"",
|
||||
@@ -167,6 +168,7 @@ service.json如下:
|
||||
* ReadTimeout:读网络超时毫秒
|
||||
* WriteTimeout:写网络超时毫秒
|
||||
* ProcessTimeout: 处理超时毫秒
|
||||
* ManualStart: 是否手动控制开始监听,如果true,需要手动调用StartListen()函数
|
||||
* CAFile: 证书文件,如果您的服务器通过web服务器代理配置https可以忽略该配置
|
||||
|
||||
**TcpService配置**
|
||||
@@ -777,11 +779,11 @@ type TestHttpService struct {
|
||||
|
||||
func (slf *TestHttpService) OnInit() error {
|
||||
//获取系统httpservice服务
|
||||
httpervice := node.GetService("HttpService").(*sysservice.HttpService)
|
||||
httpservice := node.GetService("HttpService").(*sysservice.HttpService)
|
||||
|
||||
//新建并设置路由对象
|
||||
httpRouter := sysservice.NewHttpHttpRouter()
|
||||
httpervice.SetHttpRouter(httpRouter,slf.GetEventHandler())
|
||||
httpservice.SetHttpRouter(httpRouter,slf.GetEventHandler())
|
||||
|
||||
//GET方法,请求url:http://127.0.0.1:9402/get/query?nickname=boyce
|
||||
//并header中新增key为uid,value为1000的头,则用postman测试返回结果为:
|
||||
@@ -795,6 +797,8 @@ func (slf *TestHttpService) OnInit() error {
|
||||
//GET方式获取目录下的资源,http://127.0.0.1:port/img/head/a.jpg
|
||||
httpRouter.SetServeFile(sysservice.METHOD_GET,"/img/head/","d:/img")
|
||||
|
||||
//如果配置"ManualStart": true配置为true,则使用以下方法进行开启http监听
|
||||
//httpservice.StartListen()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -465,7 +465,7 @@ func GetNodeByServiceName(serviceName string) map[int]struct{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
var mapNodeId map[int]struct{}
|
||||
mapNodeId := map[int]struct{}{}
|
||||
for nodeId,_ := range mapNode {
|
||||
mapNodeId[nodeId] = struct{}{}
|
||||
}
|
||||
|
||||
@@ -302,6 +302,11 @@ func (dc *DynamicDiscoveryClient) RPC_SubServiceDiscover(req *rpc.SubscribeDisco
|
||||
for _, nodeInfo := range mapNodeInfo {
|
||||
dc.addMasterNode(req.MasterNodeId, nodeInfo.NodeId)
|
||||
dc.setNodeInfo(nodeInfo)
|
||||
|
||||
if len(nodeInfo.PublicServiceList) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
cluster.TriggerDiscoveryEvent(true,int(nodeInfo.NodeId),nodeInfo.PublicServiceList)
|
||||
}
|
||||
|
||||
|
||||
@@ -5,7 +5,8 @@ import (
|
||||
"github.com/duanhf2012/origin/log"
|
||||
"github.com/duanhf2012/origin/rpc"
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
@@ -18,7 +19,7 @@ type NodeInfoList struct {
|
||||
|
||||
func (cls *Cluster) ReadClusterConfig(filepath string) (*NodeInfoList, error) {
|
||||
c := &NodeInfoList{}
|
||||
d, err := ioutil.ReadFile(filepath)
|
||||
d, err := os.ReadFile(filepath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -33,7 +34,7 @@ func (cls *Cluster) ReadClusterConfig(filepath string) (*NodeInfoList, error) {
|
||||
func (cls *Cluster) readServiceConfig(filepath string) (interface{}, map[string]interface{}, map[int]map[string]interface{}, error) {
|
||||
c := map[string]interface{}{}
|
||||
//读取配置
|
||||
d, err := ioutil.ReadFile(filepath)
|
||||
d, err := os.ReadFile(filepath)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
@@ -69,7 +70,7 @@ func (cls *Cluster) readLocalClusterConfig(nodeId int) ([]NodeInfo, []NodeInfo,
|
||||
var nodeInfoList []NodeInfo
|
||||
var masterDiscoverNodeList []NodeInfo
|
||||
clusterCfgPath := strings.TrimRight(configDir, "/") + "/cluster"
|
||||
fileInfoList, err := ioutil.ReadDir(clusterCfgPath)
|
||||
fileInfoList, err := os.ReadDir(clusterCfgPath)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("Read dir %s is fail :%+v", clusterCfgPath, err)
|
||||
}
|
||||
@@ -111,49 +112,89 @@ func (cls *Cluster) readLocalClusterConfig(nodeId int) ([]NodeInfo, []NodeInfo,
|
||||
|
||||
func (cls *Cluster) readLocalService(localNodeId int) error {
|
||||
clusterCfgPath := strings.TrimRight(configDir, "/") + "/cluster"
|
||||
fileInfoList, err := ioutil.ReadDir(clusterCfgPath)
|
||||
fileInfoList, err := os.ReadDir(clusterCfgPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Read dir %s is fail :%+v", clusterCfgPath, err)
|
||||
}
|
||||
|
||||
var globalCfg interface{}
|
||||
publicService := map[string]interface{}{}
|
||||
nodeService := map[string]interface{}{}
|
||||
|
||||
//读取任何文件,只读符合格式的配置,目录下的文件可以自定义分文件
|
||||
for _, f := range fileInfoList {
|
||||
if f.IsDir() == false {
|
||||
filePath := strings.TrimRight(strings.TrimRight(clusterCfgPath, "/"), "\\") + "/" + f.Name()
|
||||
if f.IsDir() == true {
|
||||
continue
|
||||
}
|
||||
|
||||
if filepath.Ext(f.Name())!= ".json" {
|
||||
continue
|
||||
}
|
||||
|
||||
filePath := strings.TrimRight(strings.TrimRight(clusterCfgPath, "/"), "\\") + "/" + f.Name()
|
||||
currGlobalCfg, serviceConfig, mapNodeService, err := cls.readServiceConfig(filePath)
|
||||
if err != nil {
|
||||
continue
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if currGlobalCfg != nil {
|
||||
//不允许重复的配置global配置
|
||||
if globalCfg != nil {
|
||||
return fmt.Errorf("[Global] does not allow repeated configuration in %s.",f.Name())
|
||||
}
|
||||
globalCfg = currGlobalCfg
|
||||
}
|
||||
|
||||
if currGlobalCfg != nil {
|
||||
cls.globalCfg = currGlobalCfg
|
||||
}
|
||||
|
||||
for _, s := range cls.localNodeInfo.ServiceList {
|
||||
for {
|
||||
//取公共服务配置
|
||||
pubCfg, ok := serviceConfig[s]
|
||||
if ok == true {
|
||||
cls.localServiceCfg[s] = pubCfg
|
||||
//保存公共配置
|
||||
for _, s := range cls.localNodeInfo.ServiceList {
|
||||
for {
|
||||
//取公共服务配置
|
||||
pubCfg, ok := serviceConfig[s]
|
||||
if ok == true {
|
||||
if _,publicOk := publicService[s];publicOk == true {
|
||||
return fmt.Errorf("public service [%s] does not allow repeated configuration in %s.",s,f.Name())
|
||||
}
|
||||
publicService[s] = pubCfg
|
||||
}
|
||||
|
||||
//如果结点也配置了该服务,则覆盖之
|
||||
nodeService, ok := mapNodeService[localNodeId]
|
||||
if ok == false {
|
||||
break
|
||||
}
|
||||
sCfg, ok := nodeService[s]
|
||||
if ok == false {
|
||||
break
|
||||
}
|
||||
|
||||
cls.localServiceCfg[s] = sCfg
|
||||
//取指定结点配置的服务
|
||||
nodeServiceCfg,ok := mapNodeService[localNodeId]
|
||||
if ok == false {
|
||||
break
|
||||
}
|
||||
nodeCfg, ok := nodeServiceCfg[s]
|
||||
if ok == false {
|
||||
break
|
||||
}
|
||||
|
||||
if _,nodeOK := nodeService[s];nodeOK == true {
|
||||
return fmt.Errorf("NodeService NodeId[%d] Service[%s] does not allow repeated configuration in %s.",cls.localNodeInfo.NodeId,s,f.Name())
|
||||
}
|
||||
nodeService[s] = nodeCfg
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//组合所有的配置
|
||||
for _, s := range cls.localNodeInfo.ServiceList {
|
||||
//先从NodeService中找
|
||||
var serviceCfg interface{}
|
||||
var ok bool
|
||||
serviceCfg,ok = nodeService[s]
|
||||
if ok == true {
|
||||
cls.localServiceCfg[s] =serviceCfg
|
||||
continue
|
||||
}
|
||||
|
||||
//如果找不到从PublicService中找
|
||||
serviceCfg,ok = publicService[s]
|
||||
if ok == true {
|
||||
cls.localServiceCfg[s] =serviceCfg
|
||||
}
|
||||
}
|
||||
cls.globalCfg = globalCfg
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
2
go.mod
2
go.mod
@@ -1,6 +1,6 @@
|
||||
module github.com/duanhf2012/origin
|
||||
|
||||
go 1.18
|
||||
go 1.19
|
||||
|
||||
require (
|
||||
github.com/go-sql-driver/mysql v1.6.0
|
||||
|
||||
@@ -2,6 +2,7 @@ package network
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"github.com/duanhf2012/origin/log"
|
||||
"net/http"
|
||||
"time"
|
||||
@@ -37,6 +38,10 @@ func (slf *HttpServer) Start() {
|
||||
}
|
||||
|
||||
func (slf *HttpServer) startListen() error {
|
||||
if slf.httpServer != nil {
|
||||
return errors.New("Duplicate start not allowed")
|
||||
}
|
||||
|
||||
var tlsCaList []tls.Certificate
|
||||
var tlsConfig *tls.Config
|
||||
for _, caFile := range slf.caFileList {
|
||||
|
||||
@@ -13,6 +13,8 @@ type TCPClient struct {
|
||||
ConnNum int
|
||||
ConnectInterval time.Duration
|
||||
PendingWriteNum int
|
||||
ReadDeadline time.Duration
|
||||
WriteDeadline time.Duration
|
||||
AutoReconnect bool
|
||||
NewAgent func(*TCPConn) Agent
|
||||
cons ConnSet
|
||||
@@ -52,6 +54,14 @@ func (client *TCPClient) init() {
|
||||
client.PendingWriteNum = 1000
|
||||
log.SRelease("invalid PendingWriteNum, reset to ", client.PendingWriteNum)
|
||||
}
|
||||
if client.ReadDeadline == 0 {
|
||||
client.ReadDeadline = 15*time.Second
|
||||
log.SRelease("invalid ReadDeadline, reset to ", int64(client.ReadDeadline.Seconds()),"s")
|
||||
}
|
||||
if client.WriteDeadline == 0 {
|
||||
client.WriteDeadline = 15*time.Second
|
||||
log.SRelease("invalid WriteDeadline, reset to ", int64(client.WriteDeadline.Seconds()),"s")
|
||||
}
|
||||
if client.NewAgent == nil {
|
||||
log.SFatal("NewAgent must not be nil")
|
||||
}
|
||||
@@ -69,6 +79,13 @@ func (client *TCPClient) init() {
|
||||
client.msgParser = msgParser
|
||||
}
|
||||
|
||||
func (client *TCPClient) GetCloseFlag() bool{
|
||||
client.Lock()
|
||||
defer client.Unlock()
|
||||
|
||||
return client.closeFlag
|
||||
}
|
||||
|
||||
func (client *TCPClient) dial() net.Conn {
|
||||
for {
|
||||
conn, err := net.Dial("tcp", client.Addr)
|
||||
@@ -93,7 +110,7 @@ reconnect:
|
||||
if conn == nil {
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
client.Lock()
|
||||
if client.closeFlag {
|
||||
client.Unlock()
|
||||
@@ -103,7 +120,7 @@ reconnect:
|
||||
client.cons[conn] = struct{}{}
|
||||
client.Unlock()
|
||||
|
||||
tcpConn := newTCPConn(conn, client.PendingWriteNum, client.msgParser)
|
||||
tcpConn := newTCPConn(conn, client.PendingWriteNum, client.msgParser,client.WriteDeadline)
|
||||
agent := client.NewAgent(tcpConn)
|
||||
agent.Run()
|
||||
|
||||
|
||||
@@ -27,7 +27,7 @@ func freeChannel(conn *TCPConn){
|
||||
}
|
||||
}
|
||||
|
||||
func newTCPConn(conn net.Conn, pendingWriteNum int, msgParser *MsgParser) *TCPConn {
|
||||
func newTCPConn(conn net.Conn, pendingWriteNum int, msgParser *MsgParser,writeDeadline time.Duration) *TCPConn {
|
||||
tcpConn := new(TCPConn)
|
||||
tcpConn.conn = conn
|
||||
tcpConn.writeChan = make(chan []byte, pendingWriteNum)
|
||||
@@ -37,6 +37,8 @@ func newTCPConn(conn net.Conn, pendingWriteNum int, msgParser *MsgParser) *TCPCo
|
||||
if b == nil {
|
||||
break
|
||||
}
|
||||
|
||||
conn.SetWriteDeadline(time.Now().Add(writeDeadline))
|
||||
_, err := conn.Write(b)
|
||||
tcpConn.msgParser.ReleaseByteSlice(b)
|
||||
|
||||
|
||||
@@ -7,10 +7,21 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
const Default_ReadDeadline = time.Second*30 //30s
|
||||
const Default_WriteDeadline = time.Second*30 //30s
|
||||
const Default_MaxConnNum = 3000
|
||||
const Default_PendingWriteNum = 10000
|
||||
const Default_LittleEndian = false
|
||||
const Default_MinMsgLen = 2
|
||||
const Default_MaxMsgLen = 65535
|
||||
|
||||
|
||||
type TCPServer struct {
|
||||
Addr string
|
||||
MaxConnNum int
|
||||
PendingWriteNum int
|
||||
ReadDeadline time.Duration
|
||||
WriteDeadline time.Duration
|
||||
NewAgent func(*TCPConn) Agent
|
||||
ln net.Listener
|
||||
conns ConnSet
|
||||
@@ -18,6 +29,7 @@ type TCPServer struct {
|
||||
wgLn sync.WaitGroup
|
||||
wgConns sync.WaitGroup
|
||||
|
||||
|
||||
// msg parser
|
||||
LenMsgLen int
|
||||
MinMsgLen uint32
|
||||
@@ -39,13 +51,33 @@ func (server *TCPServer) init() {
|
||||
}
|
||||
|
||||
if server.MaxConnNum <= 0 {
|
||||
server.MaxConnNum = 100
|
||||
server.MaxConnNum = Default_MaxConnNum
|
||||
log.SRelease("invalid MaxConnNum, reset to ", server.MaxConnNum)
|
||||
}
|
||||
if server.PendingWriteNum <= 0 {
|
||||
server.PendingWriteNum = 100
|
||||
server.PendingWriteNum = Default_PendingWriteNum
|
||||
log.SRelease("invalid PendingWriteNum, reset to ", server.PendingWriteNum)
|
||||
}
|
||||
|
||||
if server.MinMsgLen <= 0 {
|
||||
server.MinMsgLen = Default_MinMsgLen
|
||||
log.SRelease("invalid MinMsgLen, reset to ", server.MinMsgLen)
|
||||
}
|
||||
|
||||
if server.MaxMsgLen <= 0 {
|
||||
server.MaxMsgLen = Default_MaxMsgLen
|
||||
log.SRelease("invalid MaxMsgLen, reset to ", server.MaxMsgLen)
|
||||
}
|
||||
|
||||
if server.WriteDeadline == 0 {
|
||||
server.WriteDeadline = Default_WriteDeadline
|
||||
log.SRelease("invalid WriteDeadline, reset to ", server.WriteDeadline.Seconds(),"s")
|
||||
}
|
||||
if server.ReadDeadline == 0 {
|
||||
server.ReadDeadline = Default_ReadDeadline
|
||||
log.SRelease("invalid ReadDeadline, reset to ", server.ReadDeadline.Seconds(),"s")
|
||||
}
|
||||
|
||||
if server.NewAgent == nil {
|
||||
log.SFatal("NewAgent must not be nil")
|
||||
}
|
||||
@@ -110,7 +142,7 @@ func (server *TCPServer) run() {
|
||||
|
||||
server.wgConns.Add(1)
|
||||
|
||||
tcpConn := newTCPConn(conn, server.PendingWriteNum, server.msgParser)
|
||||
tcpConn := newTCPConn(conn, server.PendingWriteNum, server.msgParser,server.WriteDeadline)
|
||||
agent := server.NewAgent(tcpConn)
|
||||
go func() {
|
||||
agent.Run()
|
||||
|
||||
@@ -14,6 +14,7 @@ type WSClient struct {
|
||||
ConnectInterval time.Duration
|
||||
PendingWriteNum int
|
||||
MaxMsgLen uint32
|
||||
MessageType int
|
||||
HandshakeTimeout time.Duration
|
||||
AutoReconnect bool
|
||||
NewAgent func(*WSConn) Agent
|
||||
@@ -21,7 +22,7 @@ type WSClient struct {
|
||||
cons WebsocketConnSet
|
||||
wg sync.WaitGroup
|
||||
closeFlag bool
|
||||
messageType int
|
||||
|
||||
}
|
||||
|
||||
func (client *WSClient) Start() {
|
||||
@@ -63,7 +64,11 @@ func (client *WSClient) init() {
|
||||
if client.cons != nil {
|
||||
log.SFatal("client is running")
|
||||
}
|
||||
client.messageType = websocket.TextMessage
|
||||
|
||||
if client.MessageType == 0 {
|
||||
client.MessageType = websocket.TextMessage
|
||||
}
|
||||
|
||||
client.cons = make(WebsocketConnSet)
|
||||
client.closeFlag = false
|
||||
client.dialer = websocket.Dialer{
|
||||
@@ -84,9 +89,6 @@ func (client *WSClient) dial() *websocket.Conn {
|
||||
}
|
||||
}
|
||||
|
||||
func (client *WSClient) SetMessageType(messageType int){
|
||||
client.messageType = messageType
|
||||
}
|
||||
func (client *WSClient) connect() {
|
||||
defer client.wg.Done()
|
||||
|
||||
@@ -106,7 +108,7 @@ reconnect:
|
||||
client.cons[conn] = struct{}{}
|
||||
client.Unlock()
|
||||
|
||||
wsConn := newWSConn(conn, client.PendingWriteNum, client.MaxMsgLen,client.messageType)
|
||||
wsConn := newWSConn(conn, client.PendingWriteNum, client.MaxMsgLen,client.MessageType)
|
||||
agent := client.NewAgent(wsConn)
|
||||
agent.Run()
|
||||
|
||||
|
||||
@@ -139,6 +139,7 @@ func (server *WSServer) Start() {
|
||||
maxMsgLen: server.MaxMsgLen,
|
||||
newAgent: server.NewAgent,
|
||||
conns: make(WebsocketConnSet),
|
||||
messageType:server.messageType,
|
||||
upgrader: websocket.Upgrader{
|
||||
HandshakeTimeout: server.HTTPTimeout,
|
||||
CheckOrigin: func(_ *http.Request) bool { return true },
|
||||
|
||||
160
node/node.go
160
node/node.go
@@ -8,9 +8,9 @@ import (
|
||||
"github.com/duanhf2012/origin/log"
|
||||
"github.com/duanhf2012/origin/profiler"
|
||||
"github.com/duanhf2012/origin/service"
|
||||
"github.com/duanhf2012/origin/util/timer"
|
||||
"github.com/duanhf2012/origin/util/buildtime"
|
||||
"io/ioutil"
|
||||
"github.com/duanhf2012/origin/util/timer"
|
||||
"io"
|
||||
slog "log"
|
||||
"net/http"
|
||||
_ "net/http/pprof"
|
||||
@@ -31,33 +31,40 @@ var bValid bool
|
||||
var configDir = "./config/"
|
||||
var logLevel string = "debug"
|
||||
var logPath string
|
||||
type BuildOSType = int8
|
||||
|
||||
const(
|
||||
Windows BuildOSType = 0
|
||||
Linux BuildOSType = 1
|
||||
Mac BuildOSType = 2
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
||||
closeSig = make(chan bool,1)
|
||||
closeSig = make(chan bool, 1)
|
||||
sig = make(chan os.Signal, 3)
|
||||
signal.Notify(sig, syscall.SIGINT, syscall.SIGTERM,syscall.Signal(10))
|
||||
signal.Notify(sig, syscall.SIGINT, syscall.SIGTERM, syscall.Signal(10))
|
||||
|
||||
console.RegisterCommandBool("help",false,"<-help> This help.",usage)
|
||||
console.RegisterCommandString("name","","<-name nodeName> Node's name.",setName)
|
||||
console.RegisterCommandString("start","","<-start nodeid=nodeid> Run originserver.",startNode)
|
||||
console.RegisterCommandString("stop","","<-stop nodeid=nodeid> Stop originserver process.",stopNode)
|
||||
console.RegisterCommandString("config","","<-config path> Configuration file path.",setConfigPath)
|
||||
console.RegisterCommandBool("help", false, "<-help> This help.", usage)
|
||||
console.RegisterCommandString("name", "", "<-name nodeName> Node's name.", setName)
|
||||
console.RegisterCommandString("start", "", "<-start nodeid=nodeid> Run originserver.", startNode)
|
||||
console.RegisterCommandString("stop", "", "<-stop nodeid=nodeid> Stop originserver process.", stopNode)
|
||||
console.RegisterCommandString("config", "", "<-config path> Configuration file path.", setConfigPath)
|
||||
console.RegisterCommandString("console", "", "<-console true|false> Turn on or off screen log output.", openConsole)
|
||||
console.RegisterCommandString("loglevel", "debug", "<-loglevel debug|release|warning|error|fatal> Set loglevel.", setLevel)
|
||||
console.RegisterCommandString("logpath", "", "<-logpath path> Set log file path.", setLogPath)
|
||||
console.RegisterCommandString("pprof","","<-pprof ip:port> Open performance analysis.",setPprof)
|
||||
console.RegisterCommandString("pprof", "", "<-pprof ip:port> Open performance analysis.", setPprof)
|
||||
}
|
||||
|
||||
func usage(val interface{}) error{
|
||||
func usage(val interface{}) error {
|
||||
ret := val.(bool)
|
||||
if ret == false {
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(buildtime.GetBuildDateTime())>0 {
|
||||
fmt.Fprintf(os.Stderr, "Welcome to Origin(build info: %s)\nUsage: originserver [-help] [-start node=1] [-stop] [-config path] [-pprof 0.0.0.0:6060]...\n",buildtime.GetBuildDateTime())
|
||||
}else{
|
||||
if len(buildtime.GetBuildDateTime()) > 0 {
|
||||
fmt.Fprintf(os.Stderr, "Welcome to Origin(build info: %s)\nUsage: originserver [-help] [-start node=1] [-stop] [-config path] [-pprof 0.0.0.0:6060]...\n", buildtime.GetBuildDateTime())
|
||||
} else {
|
||||
fmt.Fprintf(os.Stderr, "Welcome to Origin\nUsage: originserver [-help] [-start node=1] [-stop] [-config path] [-pprof 0.0.0.0:6060]...\n")
|
||||
}
|
||||
|
||||
@@ -71,28 +78,28 @@ func setName(val interface{}) error {
|
||||
|
||||
func setPprof(val interface{}) error {
|
||||
listenAddr := val.(string)
|
||||
if listenAddr==""{
|
||||
if listenAddr == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
go func(){
|
||||
go func() {
|
||||
err := http.ListenAndServe(listenAddr, nil)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("%+v",err))
|
||||
panic(fmt.Errorf("%+v", err))
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func setConfigPath(val interface{}) error{
|
||||
func setConfigPath(val interface{}) error {
|
||||
configPath := val.(string)
|
||||
if configPath==""{
|
||||
if configPath == "" {
|
||||
return nil
|
||||
}
|
||||
_, err := os.Stat(configPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Cannot find file path %s",configPath)
|
||||
return fmt.Errorf("Cannot find file path %s", configPath)
|
||||
}
|
||||
|
||||
cluster.SetConfigDir(configPath)
|
||||
@@ -100,16 +107,16 @@ func setConfigPath(val interface{}) error{
|
||||
return nil
|
||||
}
|
||||
|
||||
func getRunProcessPid(nodeId int) (int,error) {
|
||||
f, err := os.OpenFile(fmt.Sprintf("%s_%d.pid",os.Args[0],nodeId), os.O_RDONLY, 0600)
|
||||
func getRunProcessPid(nodeId int) (int, error) {
|
||||
f, err := os.OpenFile(fmt.Sprintf("%s_%d.pid", os.Args[0], nodeId), os.O_RDONLY, 0600)
|
||||
defer f.Close()
|
||||
if err!= nil {
|
||||
return 0,err
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
pidByte,errs := ioutil.ReadAll(f)
|
||||
if errs!=nil {
|
||||
return 0,errs
|
||||
pidByte, errs := io.ReadAll(f)
|
||||
if errs != nil {
|
||||
return 0, errs
|
||||
}
|
||||
|
||||
return strconv.Atoi(string(pidByte))
|
||||
@@ -117,13 +124,13 @@ func getRunProcessPid(nodeId int) (int,error) {
|
||||
|
||||
func writeProcessPid(nodeId int) {
|
||||
//pid
|
||||
f, err := os.OpenFile(fmt.Sprintf("%s_%d.pid",os.Args[0],nodeId), os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0600)
|
||||
f, err := os.OpenFile(fmt.Sprintf("%s_%d.pid", os.Args[0], nodeId), os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0600)
|
||||
defer f.Close()
|
||||
if err != nil {
|
||||
fmt.Println(err.Error())
|
||||
os.Exit(-1)
|
||||
} else {
|
||||
_,err=f.Write([]byte(fmt.Sprintf("%d",os.Getpid())))
|
||||
_, err = f.Write([]byte(fmt.Sprintf("%d", os.Getpid())))
|
||||
if err != nil {
|
||||
fmt.Println(err.Error())
|
||||
os.Exit(-1)
|
||||
@@ -135,28 +142,28 @@ func GetNodeId() int {
|
||||
return nodeId
|
||||
}
|
||||
|
||||
func initNode(id int){
|
||||
func initNode(id int) {
|
||||
//1.初始化集群
|
||||
nodeId = id
|
||||
err := cluster.GetCluster().Init(GetNodeId(),Setup)
|
||||
err := cluster.GetCluster().Init(GetNodeId(), Setup)
|
||||
if err != nil {
|
||||
log.SFatal("read system config is error ",err.Error())
|
||||
log.SFatal("read system config is error ", err.Error())
|
||||
}
|
||||
|
||||
err = initLog()
|
||||
if err != nil{
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
//2.setup service
|
||||
for _,s := range preSetupService {
|
||||
for _, s := range preSetupService {
|
||||
//是否配置的service
|
||||
if cluster.GetCluster().IsConfigService(s.GetName()) == false {
|
||||
continue
|
||||
}
|
||||
|
||||
pServiceCfg := cluster.GetCluster().GetServiceCfg(s.GetName())
|
||||
s.Init(s,cluster.GetRpcClient,cluster.GetRpcServer,pServiceCfg)
|
||||
s.Init(s, cluster.GetRpcClient, cluster.GetRpcServer, pServiceCfg)
|
||||
|
||||
service.Setup(s)
|
||||
}
|
||||
@@ -165,14 +172,14 @@ func initNode(id int){
|
||||
service.Init(closeSig)
|
||||
}
|
||||
|
||||
func initLog() error{
|
||||
if logPath == ""{
|
||||
func initLog() error {
|
||||
if logPath == "" {
|
||||
setLogPath("./log")
|
||||
}
|
||||
|
||||
localnodeinfo := cluster.GetCluster().GetLocalNodeInfo()
|
||||
filepre := fmt.Sprintf("%s_%d_", localnodeinfo.NodeName, localnodeinfo.NodeId)
|
||||
logger,err := log.New(logLevel,logPath,filepre,slog.LstdFlags|slog.Lshortfile,10)
|
||||
logger, err := log.New(logLevel, logPath, filepre, slog.LstdFlags|slog.Lshortfile, 10)
|
||||
if err != nil {
|
||||
fmt.Printf("cannot create log file!\n")
|
||||
return err
|
||||
@@ -183,8 +190,8 @@ func initLog() error{
|
||||
|
||||
func Start() {
|
||||
err := console.Run(os.Args)
|
||||
if err!=nil {
|
||||
fmt.Printf("%+v\n",err)
|
||||
if err != nil {
|
||||
fmt.Printf("%+v\n", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -196,19 +203,19 @@ func stopNode(args interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
sParam := strings.Split(param,"=")
|
||||
sParam := strings.Split(param, "=")
|
||||
if len(sParam) != 2 {
|
||||
return fmt.Errorf("invalid option %s",param)
|
||||
return fmt.Errorf("invalid option %s", param)
|
||||
}
|
||||
if sParam[0]!="nodeid" {
|
||||
return fmt.Errorf("invalid option %s",param)
|
||||
if sParam[0] != "nodeid" {
|
||||
return fmt.Errorf("invalid option %s", param)
|
||||
}
|
||||
nodeId,err:= strconv.Atoi(sParam[1])
|
||||
nodeId, err := strconv.Atoi(sParam[1])
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid option %s",param)
|
||||
return fmt.Errorf("invalid option %s", param)
|
||||
}
|
||||
|
||||
processId,err := getRunProcessPid(nodeId)
|
||||
processId, err := getRunProcessPid(nodeId)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -217,26 +224,26 @@ func stopNode(args interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func startNode(args interface{}) error{
|
||||
func startNode(args interface{}) error {
|
||||
//1.解析参数
|
||||
param := args.(string)
|
||||
if param == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
sParam := strings.Split(param,"=")
|
||||
sParam := strings.Split(param, "=")
|
||||
if len(sParam) != 2 {
|
||||
return fmt.Errorf("invalid option %s",param)
|
||||
return fmt.Errorf("invalid option %s", param)
|
||||
}
|
||||
if sParam[0]!="nodeid" {
|
||||
return fmt.Errorf("invalid option %s",param)
|
||||
if sParam[0] != "nodeid" {
|
||||
return fmt.Errorf("invalid option %s", param)
|
||||
}
|
||||
nodeId,err:= strconv.Atoi(sParam[1])
|
||||
nodeId, err := strconv.Atoi(sParam[1])
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid option %s",param)
|
||||
return fmt.Errorf("invalid option %s", param)
|
||||
}
|
||||
|
||||
timer.StartTimer(10*time.Millisecond,1000000)
|
||||
timer.StartTimer(10*time.Millisecond, 1000000)
|
||||
log.SRelease("Start running server.")
|
||||
//2.初始化node
|
||||
initNode(nodeId)
|
||||
@@ -253,7 +260,7 @@ func startNode(args interface{}) error{
|
||||
//6.监听程序退出信号&性能报告
|
||||
bRun := true
|
||||
var pProfilerTicker *time.Ticker = &time.Ticker{}
|
||||
if profilerInterval>0 {
|
||||
if profilerInterval > 0 {
|
||||
pProfilerTicker = time.NewTicker(profilerInterval)
|
||||
}
|
||||
for bRun {
|
||||
@@ -261,7 +268,7 @@ func startNode(args interface{}) error{
|
||||
case <-sig:
|
||||
log.SRelease("receipt stop signal.")
|
||||
bRun = false
|
||||
case <- pProfilerTicker.C:
|
||||
case <-pProfilerTicker.C:
|
||||
profiler.Report()
|
||||
}
|
||||
}
|
||||
@@ -274,11 +281,10 @@ func startNode(args interface{}) error{
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
func Setup(s ...service.IService) {
|
||||
for _,sv := range s {
|
||||
func Setup(s ...service.IService) {
|
||||
for _, sv := range s {
|
||||
sv.OnSetup(sv)
|
||||
preSetupService = append(preSetupService,sv)
|
||||
preSetupService = append(preSetupService, sv)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -286,7 +292,7 @@ func GetService(serviceName string) service.IService {
|
||||
return service.GetService(serviceName)
|
||||
}
|
||||
|
||||
func SetConfigDir(configDir string){
|
||||
func SetConfigDir(configDir string) {
|
||||
configDir = configDir
|
||||
cluster.SetConfigDir(configDir)
|
||||
}
|
||||
@@ -295,58 +301,58 @@ func GetConfigDir() string {
|
||||
return configDir
|
||||
}
|
||||
|
||||
func SetSysLog(strLevel string, pathname string, flag int){
|
||||
logs,_:= log.New(strLevel,pathname, "", flag,10)
|
||||
func SetSysLog(strLevel string, pathname string, flag int) {
|
||||
logs, _ := log.New(strLevel, pathname, "", flag, 10)
|
||||
log.Export(logs)
|
||||
}
|
||||
|
||||
func OpenProfilerReport(interval time.Duration){
|
||||
func OpenProfilerReport(interval time.Duration) {
|
||||
profilerInterval = interval
|
||||
}
|
||||
|
||||
func openConsole(args interface{}) error{
|
||||
func openConsole(args interface{}) error {
|
||||
if args == "" {
|
||||
return nil
|
||||
}
|
||||
strOpen := strings.ToLower(strings.TrimSpace(args.(string)))
|
||||
if strOpen == "false" {
|
||||
log.OpenConsole = false
|
||||
}else if strOpen == "true" {
|
||||
} else if strOpen == "true" {
|
||||
log.OpenConsole = true
|
||||
}else{
|
||||
} else {
|
||||
return errors.New("Parameter console error!")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func setLevel(args interface{}) error{
|
||||
if args==""{
|
||||
func setLevel(args interface{}) error {
|
||||
if args == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
logLevel = strings.TrimSpace(args.(string))
|
||||
if logLevel!= "debug" && logLevel!="release"&& logLevel!="warning"&&logLevel!="error"&&logLevel!="fatal" {
|
||||
if logLevel != "debug" && logLevel != "release" && logLevel != "warning" && logLevel != "error" && logLevel != "fatal" {
|
||||
return errors.New("unknown level: " + logLevel)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func setLogPath(args interface{}) error{
|
||||
if args == ""{
|
||||
func setLogPath(args interface{}) error {
|
||||
if args == "" {
|
||||
return nil
|
||||
}
|
||||
logPath = strings.TrimSpace(args.(string))
|
||||
dir, err := os.Stat(logPath) //这个文件夹不存在
|
||||
if err == nil && dir.IsDir()==false {
|
||||
return errors.New("Not found dir "+logPath)
|
||||
if err == nil && dir.IsDir() == false {
|
||||
return errors.New("Not found dir " + logPath)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
err = os.Mkdir(logPath, os.ModePerm)
|
||||
if err != nil {
|
||||
return errors.New("Cannot create dir "+logPath)
|
||||
return errors.New("Cannot create dir " + logPath)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,3 +15,7 @@ func KillProcess(processId int){
|
||||
fmt.Printf("kill processid %d is successful.\n",processId)
|
||||
}
|
||||
}
|
||||
|
||||
func GetBuildOSType() BuildOSType{
|
||||
return Linux
|
||||
}
|
||||
|
||||
@@ -15,3 +15,7 @@ func KillProcess(processId int){
|
||||
fmt.Printf("kill processid %d is successful.\n",processId)
|
||||
}
|
||||
}
|
||||
|
||||
func GetBuildOSType() BuildOSType{
|
||||
return Mac
|
||||
}
|
||||
|
||||
@@ -4,4 +4,8 @@ package node
|
||||
|
||||
func KillProcess(processId int){
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func GetBuildOSType() BuildOSType{
|
||||
return Windows
|
||||
}
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"fmt"
|
||||
"github.com/duanhf2012/origin/log"
|
||||
"github.com/duanhf2012/origin/network"
|
||||
"github.com/duanhf2012/origin/util/timer"
|
||||
"math"
|
||||
"reflect"
|
||||
"runtime"
|
||||
@@ -32,6 +31,9 @@ type Client struct {
|
||||
TriggerRpcEvent
|
||||
}
|
||||
|
||||
const MaxCheckCallRpcCount = 1000
|
||||
const MaxPendingWriteNum = 200000
|
||||
const ConnectInterval = 2*time.Second
|
||||
var clientSeq uint32
|
||||
|
||||
func (client *Client) NewClientAgent(conn *network.TCPConn) network.Agent {
|
||||
@@ -41,18 +43,23 @@ func (client *Client) NewClientAgent(conn *network.TCPConn) network.Agent {
|
||||
return client
|
||||
}
|
||||
|
||||
|
||||
func (client *Client) Connect(id int, addr string, maxRpcParamLen uint32) error {
|
||||
client.clientSeq = atomic.AddUint32(&clientSeq, 1)
|
||||
client.id = id
|
||||
client.Addr = addr
|
||||
client.maxCheckCallRpcCount = 1000
|
||||
client.maxCheckCallRpcCount = MaxCheckCallRpcCount
|
||||
client.callRpcTimeout = 15 * time.Second
|
||||
client.ConnNum = 1
|
||||
client.ConnectInterval = time.Second * 2
|
||||
client.PendingWriteNum = 200000
|
||||
client.ConnectInterval = ConnectInterval
|
||||
client.PendingWriteNum = MaxPendingWriteNum
|
||||
client.AutoReconnect = true
|
||||
|
||||
client.ConnNum = 1
|
||||
client.LenMsgLen = 4
|
||||
client.MinMsgLen = 2
|
||||
client.ReadDeadline = Default_ReadWriteDeadline
|
||||
client.WriteDeadline = Default_ReadWriteDeadline
|
||||
|
||||
if maxRpcParamLen > 0 {
|
||||
client.MaxMsgLen = maxRpcParamLen
|
||||
} else {
|
||||
@@ -73,17 +80,10 @@ func (client *Client) Connect(id int, addr string, maxRpcParamLen uint32) error
|
||||
}
|
||||
|
||||
func (client *Client) startCheckRpcCallTimer() {
|
||||
t := timer.NewTimer(5 * time.Second)
|
||||
for {
|
||||
select {
|
||||
case cTimer := <-t.C:
|
||||
cTimer.SetupTimer(time.Now())
|
||||
client.checkRpcCallTimeout()
|
||||
}
|
||||
time.Sleep(5 * time.Second)
|
||||
client.checkRpcCallTimeout()
|
||||
}
|
||||
|
||||
t.Cancel()
|
||||
timer.ReleaseTimer(t)
|
||||
}
|
||||
|
||||
func (client *Client) makeCallFail(call *Call) {
|
||||
@@ -161,6 +161,10 @@ func (client *Client) removePending(seq uint64) *Call {
|
||||
}
|
||||
|
||||
func (client *Client) FindPending(seq uint64) *Call {
|
||||
if seq == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
client.pendingLock.Lock()
|
||||
v, ok := client.pending[seq]
|
||||
if ok == false {
|
||||
@@ -341,6 +345,19 @@ func (client *Client) GetId() int {
|
||||
|
||||
func (client *Client) Close(waitDone bool) {
|
||||
client.TCPClient.Close(waitDone)
|
||||
|
||||
client.pendingLock.Lock()
|
||||
for {
|
||||
pElem := client.pendingTimer.Front()
|
||||
if pElem == nil {
|
||||
break
|
||||
}
|
||||
|
||||
pCall := pElem.Value.(*Call)
|
||||
pCall.Err = errors.New("nodeid is disconnect ")
|
||||
client.makeCallFail(pCall)
|
||||
}
|
||||
client.pendingLock.Unlock()
|
||||
}
|
||||
|
||||
func (client *Client) GetClientSeq() uint32 {
|
||||
|
||||
1777
rpc/messagequeue.pb.go
Normal file
1777
rpc/messagequeue.pb.go
Normal file
File diff suppressed because it is too large
Load Diff
51
rpc/messagequeue.proto
Normal file
51
rpc/messagequeue.proto
Normal file
@@ -0,0 +1,51 @@
|
||||
syntax = "proto3";
|
||||
|
||||
option go_package = ".;rpc";
|
||||
|
||||
|
||||
message DBQueuePopReq {
|
||||
string CustomerId = 1;
|
||||
string QueueName = 2;
|
||||
int32 PopStartPos = 3;
|
||||
int32 PopNum = 4;
|
||||
bytes pushData = 5;
|
||||
}
|
||||
|
||||
message DBQueuePopRes {
|
||||
string QueueName = 1;
|
||||
repeated bytes pushData = 2;
|
||||
}
|
||||
|
||||
enum SubscribeType {
|
||||
Subscribe = 0;
|
||||
Unsubscribe = 1;
|
||||
}
|
||||
|
||||
enum SubscribeMethod {
|
||||
Method_Custom = 0;//自定义模式,以消费者设置的StartIndex开始获取或订阅
|
||||
Method_Last = 1;//Last模式,以该消费者上次记录的位置开始订阅
|
||||
}
|
||||
|
||||
//订阅
|
||||
message DBQueueSubscribeReq {
|
||||
SubscribeType SubType = 1; //订阅类型
|
||||
SubscribeMethod Method = 2; //订阅方法
|
||||
string CustomerId = 3; //消费者Id
|
||||
int32 FromNodeId = 4;
|
||||
string RpcMethod = 5;
|
||||
string TopicName = 6; //主题名称
|
||||
uint64 StartIndex = 7; //开始位置 ,格式前4位是时间戳秒,后面是序号。如果填0时,服务自动修改成:(4bit 当前时间秒)| (0000 4bit)
|
||||
int32 OneBatchQuantity = 8;//订阅一次发送的数量,不设置有默认值1000条
|
||||
}
|
||||
|
||||
message DBQueueSubscribeRes {
|
||||
|
||||
}
|
||||
|
||||
message DBQueuePublishReq {
|
||||
string TopicName = 1; //主是,名称,数据
|
||||
repeated bytes pushData = 2;
|
||||
}
|
||||
|
||||
message DBQueuePublishRes {
|
||||
}
|
||||
3148
rpc/rank.pb.go
Normal file
3148
rpc/rank.pb.go
Normal file
File diff suppressed because it is too large
Load Diff
79
rpc/rank.proto
Normal file
79
rpc/rank.proto
Normal file
@@ -0,0 +1,79 @@
|
||||
syntax = "proto3";
|
||||
package rpc;
|
||||
option go_package = ".;rpc";
|
||||
|
||||
// RankData 排行数据
|
||||
message RankData {
|
||||
uint64 Key = 1; //数据主建
|
||||
repeated int64 SortData = 2; //参与排行的数据
|
||||
bytes Data = 3; //不参与排行的数据
|
||||
}
|
||||
|
||||
// RankPosData 排行数据——查询返回
|
||||
message RankPosData {
|
||||
uint64 Key = 1; //数据主建
|
||||
uint64 Rank = 2; //名次
|
||||
repeated int64 SortData = 3; //参与排行的数据
|
||||
bytes Data = 4; //不参与排行的数据
|
||||
}
|
||||
|
||||
// RankList 排行榜数据
|
||||
message RankList {
|
||||
uint64 RankId = 1; //排行榜类型
|
||||
string RankName = 2; //排行榜名称
|
||||
int32 SkipListLevel = 3; //排行榜level-生成的跳表的level, 8/16/32/64等
|
||||
bool IsDec = 4; //不参与排行的数据
|
||||
uint64 MaxRank = 5; //最大排名
|
||||
int64 ExpireMs = 6; //有效时间,0永不过期
|
||||
}
|
||||
|
||||
// UpsetRankData 更新排行榜数据
|
||||
message UpsetRankData {
|
||||
uint64 RankId = 1; //排行榜的ID
|
||||
repeated RankData RankDataList = 2; //排行数据
|
||||
}
|
||||
|
||||
// DeleteByKey 删除排行榜数据
|
||||
message DeleteByKey {
|
||||
uint64 RankId = 1; //排行榜的分类ID
|
||||
repeated uint64 KeyList = 2; //排行数据
|
||||
}
|
||||
|
||||
// AddRankList 新增排行榜
|
||||
message AddRankList {
|
||||
repeated RankList AddList = 1; //添加的排行榜列表
|
||||
}
|
||||
|
||||
// FindRankDataByKey 查找排行信息
|
||||
message FindRankDataByKey {
|
||||
uint64 RankId = 1; //排行榜的ID
|
||||
uint64 Key = 2; //排行的key
|
||||
}
|
||||
|
||||
// FindRankDataByRank 查找排行信息
|
||||
message FindRankDataByRank {
|
||||
uint64 RankId = 1; //排行榜的ID
|
||||
uint64 Rank = 2; //排行名次
|
||||
}
|
||||
|
||||
// FindRankDataList 查找排行信息
|
||||
message FindRankDataList {
|
||||
uint64 RankId = 1; //排行榜的ID
|
||||
uint64 StartRank = 2; //排行的位置 0开始
|
||||
uint64 Count = 3; //查询格式
|
||||
uint64 Key = 4; //附带一个Key查询排行信息
|
||||
}
|
||||
|
||||
// RankDataList
|
||||
message RankDataList {
|
||||
uint64 RankDataCount = 1; //排行长度
|
||||
repeated RankPosData RankPosDataList = 2; //排行数据
|
||||
RankPosData KeyRank = 3; //附带的Key查询排行结果信息
|
||||
}
|
||||
|
||||
// RankResult
|
||||
message RankResult {
|
||||
int32 AddCount = 1;//新增数量
|
||||
int32 ModifyCount = 2; //修改数量
|
||||
int32 RemoveCount = 3;//删除数量
|
||||
}
|
||||
@@ -85,7 +85,7 @@ type IRpcHandler interface {
|
||||
GetRpcHandler() IRpcHandler
|
||||
HandlerRpcRequest(request *RpcRequest)
|
||||
HandlerRpcResponseCB(call *Call)
|
||||
CallMethod(ServiceMethod string, param interface{}, reply interface{}) error
|
||||
CallMethod(client *Client,ServiceMethod string, param interface{},callBack reflect.Value, reply interface{}) error
|
||||
AsyncCall(serviceMethod string, args interface{}, callback interface{}) error
|
||||
Call(serviceMethod string, args interface{}, reply interface{}) error
|
||||
Go(serviceMethod string, args interface{}) error
|
||||
@@ -299,7 +299,7 @@ func (handler *RpcHandler) HandlerRpcRequest(request *RpcRequest) {
|
||||
}
|
||||
}
|
||||
|
||||
func (handler *RpcHandler) CallMethod(ServiceMethod string, param interface{}, reply interface{}) error {
|
||||
func (handler *RpcHandler) CallMethod(client *Client,ServiceMethod string, param interface{},callBack reflect.Value, reply interface{}) error {
|
||||
var err error
|
||||
v, ok := handler.mapFunctions[ServiceMethod]
|
||||
if ok == false {
|
||||
@@ -309,14 +309,101 @@ func (handler *RpcHandler) CallMethod(ServiceMethod string, param interface{}, r
|
||||
}
|
||||
|
||||
var paramList []reflect.Value
|
||||
paramList = append(paramList, reflect.ValueOf(handler.GetRpcHandler())) //接受者
|
||||
paramList = append(paramList, reflect.ValueOf(param))
|
||||
paramList = append(paramList, reflect.ValueOf(reply)) //输出参数
|
||||
var returnValues []reflect.Value
|
||||
var pCall *Call
|
||||
var callSeq uint64
|
||||
if v.hasResponder == true {
|
||||
paramList = append(paramList, reflect.ValueOf(handler.GetRpcHandler())) //接受者
|
||||
pCall = MakeCall()
|
||||
pCall.callback = &callBack
|
||||
pCall.Seq = client.generateSeq()
|
||||
callSeq = pCall.Seq
|
||||
|
||||
returnValues := v.method.Func.Call(paramList)
|
||||
errInter := returnValues[0].Interface()
|
||||
if errInter != nil {
|
||||
err = errInter.(error)
|
||||
client.AddPending(pCall)
|
||||
|
||||
//有返回值时
|
||||
if reply != nil {
|
||||
//如果是Call同步调用
|
||||
hander :=func(Returns interface{}, Err RpcError) {
|
||||
rpcCall := client.RemovePending(callSeq)
|
||||
if rpcCall == nil {
|
||||
log.SError("cannot find call seq ",callSeq)
|
||||
return
|
||||
}
|
||||
|
||||
//解析数据
|
||||
if len(Err)!=0 {
|
||||
rpcCall.Err = Err
|
||||
}else if Returns != nil {
|
||||
_, processor := GetProcessorType(Returns)
|
||||
var bytes []byte
|
||||
bytes,rpcCall.Err = processor.Marshal(Returns)
|
||||
if rpcCall.Err == nil {
|
||||
rpcCall.Err = processor.Unmarshal(bytes,reply)
|
||||
}
|
||||
}
|
||||
|
||||
//如果找不到,说明已经超时
|
||||
rpcCall.Reply = reply
|
||||
rpcCall.done<-rpcCall
|
||||
}
|
||||
paramList = append(paramList, reflect.ValueOf(hander))
|
||||
}else{//无返回值时,是一个requestHandlerNull空回调
|
||||
paramList = append(paramList, callBack)
|
||||
}
|
||||
paramList = append(paramList, reflect.ValueOf(param))
|
||||
|
||||
//rpc函数被调用
|
||||
returnValues = v.method.Func.Call(paramList)
|
||||
|
||||
//判断返回值是否错误,有错误时则回调
|
||||
errInter := returnValues[0].Interface()
|
||||
if errInter != nil && callBack!=requestHandlerNull{
|
||||
err = errInter.(error)
|
||||
callBack.Call([]reflect.Value{reflect.ValueOf(reply), reflect.ValueOf(err)})
|
||||
}
|
||||
}else{
|
||||
paramList = append(paramList, reflect.ValueOf(handler.GetRpcHandler())) //接受者
|
||||
paramList = append(paramList, reflect.ValueOf(param))
|
||||
|
||||
//被调用RPC函数有返回值时
|
||||
if v.outParamValue.IsValid() {
|
||||
//不带返回值参数的RPC函数
|
||||
if reply == nil {
|
||||
paramList = append(paramList, reflect.New(v.outParamValue.Type().Elem()))
|
||||
}else{
|
||||
//带返回值参数的RPC函数
|
||||
paramList = append(paramList, reflect.ValueOf(reply)) //输出参数
|
||||
}
|
||||
}
|
||||
|
||||
returnValues = v.method.Func.Call(paramList)
|
||||
errInter := returnValues[0].Interface()
|
||||
|
||||
//如果无回调
|
||||
if callBack != requestHandlerNull {
|
||||
valErr := nilError
|
||||
if errInter != nil {
|
||||
err = errInter.(error)
|
||||
valErr = reflect.ValueOf(err)
|
||||
}
|
||||
|
||||
callBack.Call([]reflect.Value{reflect.ValueOf(reply),valErr })
|
||||
}
|
||||
}
|
||||
|
||||
rpcCall := client.FindPending(callSeq)
|
||||
if rpcCall!=nil {
|
||||
err = rpcCall.Done().Err
|
||||
if rpcCall.callback!= nil {
|
||||
valErr := nilError
|
||||
if rpcCall.Err != nil {
|
||||
valErr = reflect.ValueOf(rpcCall.Err)
|
||||
}
|
||||
rpcCall.callback.Call([]reflect.Value{reflect.ValueOf(rpcCall.Reply), valErr})
|
||||
}
|
||||
client.RemovePending(rpcCall.Seq)
|
||||
ReleaseCall(rpcCall)
|
||||
}
|
||||
|
||||
return err
|
||||
@@ -356,7 +443,7 @@ func (handler *RpcHandler) goRpc(processor IRpcProcessor, bCast bool, nodeId int
|
||||
serviceName := serviceMethod[:findIndex]
|
||||
if serviceName == handler.rpcHandler.GetName() { //自己服务调用
|
||||
//调用自己rpcHandler处理器
|
||||
return pLocalRpcServer.myselfRpcHandlerGo(serviceName, serviceMethod, args, nil)
|
||||
return pLocalRpcServer.myselfRpcHandlerGo(pClientList[i],serviceName, serviceMethod, args, requestHandlerNull,nil)
|
||||
}
|
||||
//其他的rpcHandler的处理器
|
||||
pCall := pLocalRpcServer.selfNodeRpcHandlerGo(processor, pClientList[i], true, serviceName, 0, serviceMethod, args, nil, nil)
|
||||
@@ -410,7 +497,7 @@ func (handler *RpcHandler) callRpc(nodeId int, serviceMethod string, args interf
|
||||
serviceName := serviceMethod[:findIndex]
|
||||
if serviceName == handler.rpcHandler.GetName() { //自己服务调用
|
||||
//调用自己rpcHandler处理器
|
||||
return pLocalRpcServer.myselfRpcHandlerGo(serviceName, serviceMethod, args, reply)
|
||||
return pLocalRpcServer.myselfRpcHandlerGo(pClient,serviceName, serviceMethod, args,requestHandlerNull, reply)
|
||||
}
|
||||
//其他的rpcHandler的处理器
|
||||
pCall := pLocalRpcServer.selfNodeRpcHandlerGo(nil, pClient, false, serviceName, 0, serviceMethod, args, reply, nil)
|
||||
@@ -489,12 +576,7 @@ func (handler *RpcHandler) asyncCallRpc(nodeId int, serviceMethod string, args i
|
||||
serviceName := serviceMethod[:findIndex]
|
||||
//调用自己rpcHandler处理器
|
||||
if serviceName == handler.rpcHandler.GetName() { //自己服务调用
|
||||
err := pLocalRpcServer.myselfRpcHandlerGo(serviceName, serviceMethod, args, reply)
|
||||
if err == nil {
|
||||
fVal.Call([]reflect.Value{reflect.ValueOf(reply), nilError})
|
||||
} else {
|
||||
fVal.Call([]reflect.Value{reflect.ValueOf(reply), reflect.ValueOf(err)})
|
||||
}
|
||||
return pLocalRpcServer.myselfRpcHandlerGo(pClient,serviceName, serviceMethod, args,fVal ,reply)
|
||||
}
|
||||
|
||||
//其他的rpcHandler的处理器
|
||||
@@ -571,7 +653,7 @@ func (handler *RpcHandler) RawGoNode(rpcProcessorType RpcProcessorType, nodeId i
|
||||
pLocalRpcServer := handler.funcRpcServer()
|
||||
//调用自己rpcHandler处理器
|
||||
if serviceName == handler.rpcHandler.GetName() { //自己服务调用
|
||||
err := pLocalRpcServer.myselfRpcHandlerGo(serviceName, serviceName, rawArgs.GetRawData(), nil)
|
||||
err := pLocalRpcServer.myselfRpcHandlerGo(handler.pClientList[i],serviceName, serviceName, rawArgs.GetRawData(), requestHandlerNull,nil)
|
||||
//args.DoGc()
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"net"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type RpcProcessorType uint8
|
||||
@@ -62,6 +63,8 @@ func (server *Server) Init(rpcHandleFinder RpcHandleFinder) {
|
||||
server.rpcServer = &network.TCPServer{}
|
||||
}
|
||||
|
||||
const Default_ReadWriteDeadline = 15*time.Second
|
||||
|
||||
func (server *Server) Start(listenAddr string, maxRpcParamLen uint32) {
|
||||
splitAddr := strings.Split(listenAddr, ":")
|
||||
if len(splitAddr) != 2 {
|
||||
@@ -77,10 +80,12 @@ func (server *Server) Start(listenAddr string, maxRpcParamLen uint32) {
|
||||
server.rpcServer.MaxMsgLen = math.MaxUint32
|
||||
}
|
||||
|
||||
server.rpcServer.MaxConnNum = 10000
|
||||
server.rpcServer.MaxConnNum = 100000
|
||||
server.rpcServer.PendingWriteNum = 2000000
|
||||
server.rpcServer.NewAgent = server.NewAgent
|
||||
server.rpcServer.LittleEndian = LittleEndian
|
||||
server.rpcServer.WriteDeadline = Default_ReadWriteDeadline
|
||||
server.rpcServer.ReadDeadline = Default_ReadWriteDeadline
|
||||
server.rpcServer.Start()
|
||||
}
|
||||
|
||||
@@ -233,7 +238,7 @@ func (server *Server) NewAgent(c *network.TCPConn) network.Agent {
|
||||
return agent
|
||||
}
|
||||
|
||||
func (server *Server) myselfRpcHandlerGo(handlerName string, serviceMethod string, args interface{}, reply interface{}) error {
|
||||
func (server *Server) myselfRpcHandlerGo(client *Client,handlerName string, serviceMethod string, args interface{},callBack reflect.Value, reply interface{}) error {
|
||||
rpcHandler := server.rpcHandleFinder.FindRpcHandler(handlerName)
|
||||
if rpcHandler == nil {
|
||||
err := errors.New("service method " + serviceMethod + " not config!")
|
||||
@@ -241,7 +246,9 @@ func (server *Server) myselfRpcHandlerGo(handlerName string, serviceMethod strin
|
||||
return err
|
||||
}
|
||||
|
||||
return rpcHandler.CallMethod(serviceMethod, args, reply)
|
||||
|
||||
|
||||
return rpcHandler.CallMethod(client,serviceMethod, args,callBack, reply)
|
||||
}
|
||||
|
||||
func (server *Server) selfNodeRpcHandlerGo(processor IRpcProcessor, client *Client, noReply bool, handlerName string, rpcMethodId uint32, serviceMethod string, args interface{}, reply interface{}, rawArgs []byte) *Call {
|
||||
@@ -252,8 +259,8 @@ func (server *Server) selfNodeRpcHandlerGo(processor IRpcProcessor, client *Clie
|
||||
if rpcHandler == nil {
|
||||
pCall.Seq = 0
|
||||
pCall.Err = errors.New("service method " + serviceMethod + " not config!")
|
||||
log.SError(pCall.Err.Error())
|
||||
pCall.done <- pCall
|
||||
log.SError(pCall.Err.Error())
|
||||
|
||||
return pCall
|
||||
}
|
||||
@@ -277,33 +284,34 @@ func (server *Server) selfNodeRpcHandlerGo(processor IRpcProcessor, client *Clie
|
||||
|
||||
if noReply == false {
|
||||
client.AddPending(pCall)
|
||||
callSeq := pCall.Seq
|
||||
req.requestHandle = func(Returns interface{}, Err RpcError) {
|
||||
if reply != nil && Returns != reply && Returns != nil {
|
||||
byteReturns, err := req.rpcProcessor.Marshal(Returns)
|
||||
if err != nil {
|
||||
log.SError("returns data cannot be marshal ", pCall.Seq)
|
||||
log.SError("returns data cannot be marshal ", callSeq)
|
||||
ReleaseRpcRequest(req)
|
||||
}
|
||||
|
||||
err = req.rpcProcessor.Unmarshal(byteReturns, reply)
|
||||
if err != nil {
|
||||
log.SError("returns data cannot be Unmarshal ", pCall.Seq)
|
||||
log.SError("returns data cannot be Unmarshal ", callSeq)
|
||||
ReleaseRpcRequest(req)
|
||||
}
|
||||
}
|
||||
|
||||
v := client.RemovePending(pCall.Seq)
|
||||
v := client.RemovePending(callSeq)
|
||||
if v == nil {
|
||||
log.SError("rpcClient cannot find seq ", pCall.Seq, " in pending")
|
||||
log.SError("rpcClient cannot find seq ",callSeq, " in pending")
|
||||
ReleaseRpcRequest(req)
|
||||
return
|
||||
}
|
||||
if len(Err) == 0 {
|
||||
pCall.Err = nil
|
||||
v.Err = nil
|
||||
} else {
|
||||
pCall.Err = Err
|
||||
v.Err = Err
|
||||
}
|
||||
pCall.done <- pCall
|
||||
v.done <- v
|
||||
ReleaseRpcRequest(req)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
package service
|
||||
|
||||
import "errors"
|
||||
|
||||
//本地所有的service
|
||||
var mapServiceName map[string]IService
|
||||
var setupServiceList []IService
|
||||
@@ -23,7 +25,8 @@ func Init(chanCloseSig chan bool) {
|
||||
for _,s := range setupServiceList {
|
||||
err := s.OnInit()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
errs := errors.New("Failed to initialize "+s.GetName()+" service:"+err.Error())
|
||||
panic(errs)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@@ -64,7 +64,7 @@ func (m *HttpClientModule) Init(maxpool int, proxyUrl string) {
|
||||
Proxy: proxyFun,
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
|
||||
},
|
||||
Timeout: 5 * time.Second,
|
||||
Timeout: 5 * time.Second,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -103,7 +103,7 @@ func (m *HttpClientModule) Request(method string, url string, body []byte, heade
|
||||
}
|
||||
defer rsp.Body.Close()
|
||||
|
||||
ret.Body, err = ioutil.ReadAll(rsp.Body)
|
||||
ret.Body, err = io.ReadAll(rsp.Body)
|
||||
if err != nil {
|
||||
ret.Err = err
|
||||
return ret
|
||||
|
||||
@@ -52,10 +52,10 @@ func (mm *MongoModule) TakeSession() Session {
|
||||
return Session{Client: mm.client, maxOperatorTimeOut: mm.maxOperatorTimeOut}
|
||||
}
|
||||
|
||||
func (s *Session) CountDocument(db string, collection string) (int64, error) {
|
||||
func (s *Session) CountDocument(db string, collection string, filter interface{}) (int64, error) {
|
||||
ctxTimeout, cancel := s.GetDefaultContext()
|
||||
defer cancel()
|
||||
return s.Database(db).Collection(collection).CountDocuments(ctxTimeout, bson.D{})
|
||||
return s.Database(db).Collection(collection).CountDocuments(ctxTimeout, filter)
|
||||
}
|
||||
|
||||
func (s *Session) NextSeq(db string, collection string, id interface{}) (int, error) {
|
||||
|
||||
@@ -42,17 +42,17 @@ type RedisModule struct {
|
||||
|
||||
// ConfigRedis 服务器配置
|
||||
type ConfigRedis struct {
|
||||
IP string
|
||||
Port int
|
||||
Password string
|
||||
DbIndex int
|
||||
MaxIdle int //最大的空闲连接数,表示即使没有redis连接时依然可以保持N个空闲的连接,而不被清除,随时处于待命状态。
|
||||
MaxActive int //最大的激活连接数,表示同时最多有N个连接
|
||||
IdleTimeout int //最大的空闲连接等待时间,超过此时间后,空闲连接将被关闭
|
||||
IP string
|
||||
Port int
|
||||
Password string
|
||||
DbIndex int
|
||||
MaxIdle int //最大的空闲连接数,表示即使没有redis连接时依然可以保持N个空闲的连接,而不被清除,随时处于待命状态。
|
||||
MaxActive int //最大的激活连接数,表示同时最多有N个连接
|
||||
IdleTimeout int //最大的空闲连接等待时间,超过此时间后,空闲连接将被关闭
|
||||
}
|
||||
|
||||
func (m *RedisModule) Init(redisCfg *ConfigRedis) {
|
||||
redisServer := fmt.Sprintf("%s:%d",redisCfg.IP, redisCfg.Port)
|
||||
redisServer := fmt.Sprintf("%s:%d", redisCfg.IP, redisCfg.Port)
|
||||
m.redisPool = &redis.Pool{
|
||||
Wait: true,
|
||||
MaxIdle: redisCfg.MaxIdle,
|
||||
@@ -192,7 +192,6 @@ func (m *RedisModule) HSetStruct(key string, val interface{}) error {
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
|
||||
_, err = conn.Do("HSET", redis.Args{}.Add(key).AddFlat(val)...)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -254,11 +253,11 @@ func (m *RedisModule) setMuchStringByExpire(mapInfo map[interface{}]interface{},
|
||||
}
|
||||
}
|
||||
|
||||
if serr!=nil {
|
||||
if serr != nil {
|
||||
log.Error("setMuchStringByExpire fail,reason:%v", serr)
|
||||
conn.Do("DISCARD")
|
||||
return serr
|
||||
}else{
|
||||
} else {
|
||||
_, err = conn.Do("EXEC")
|
||||
}
|
||||
|
||||
@@ -287,7 +286,7 @@ func (m *RedisModule) GetString(key interface{}) (string, error) {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return redis.String(ret,nil)
|
||||
return redis.String(ret, nil)
|
||||
}
|
||||
|
||||
func (m *RedisModule) GetStringJSON(key string, st interface{}) error {
|
||||
@@ -345,7 +344,7 @@ func (m *RedisModule) GetStringMap(keys []string) (retMap map[string]string, err
|
||||
if err != nil {
|
||||
log.Error("GetMuchString fail,reason:%v", err)
|
||||
conn.Do("DISCARD")
|
||||
return nil,err
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -442,7 +441,7 @@ func (m *RedisModule) DelStringKeyList(keys []interface{}) (map[interface{}]bool
|
||||
if err != nil {
|
||||
log.Error("DelMuchString fail,reason:%v", err)
|
||||
conn.Do("DISCARD")
|
||||
return nil,err
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// 执行命令
|
||||
@@ -491,7 +490,7 @@ func (m *RedisModule) SetHash(redisKey, hashKey, value interface{}) error {
|
||||
return retErr
|
||||
}
|
||||
|
||||
//GetRedisAllHashJSON ...
|
||||
// GetRedisAllHashJSON ...
|
||||
func (m *RedisModule) GetAllHashJSON(redisKey string) (map[string]string, error) {
|
||||
if redisKey == "" {
|
||||
return nil, errors.New("Key Is Empty")
|
||||
@@ -531,7 +530,7 @@ func (m *RedisModule) GetHash(redisKey interface{}, fieldKey interface{}) (strin
|
||||
return "", errors.New("Reids Get Hash nil")
|
||||
}
|
||||
|
||||
return redis.String(value,nil)
|
||||
return redis.String(value, nil)
|
||||
}
|
||||
|
||||
func (m *RedisModule) GetMuchHash(args ...interface{}) ([]string, error) {
|
||||
@@ -556,7 +555,7 @@ func (m *RedisModule) GetMuchHash(args ...interface{}) ([]string, error) {
|
||||
|
||||
valueList := value.([]interface{})
|
||||
retList := []string{}
|
||||
for _, valueItem := range valueList{
|
||||
for _, valueItem := range valueList {
|
||||
valueByte, ok := valueItem.([]byte)
|
||||
if !ok {
|
||||
retList = append(retList, "")
|
||||
@@ -618,8 +617,8 @@ func (m *RedisModule) SetHashMapJSON(redisKey string, mapFieldValue map[interfac
|
||||
for symbol, val := range mapFieldValue {
|
||||
temp, err := json.Marshal(val)
|
||||
if err == nil {
|
||||
_,err = conn.Do("HSET", redisKey, symbol, temp)
|
||||
if err!=nil {
|
||||
_, err = conn.Do("HSET", redisKey, symbol, temp)
|
||||
if err != nil {
|
||||
log.Error("SetMuchHashJSON fail,reason:%v", err)
|
||||
conn.Send("DISCARD")
|
||||
return err
|
||||
@@ -650,25 +649,25 @@ func (m *RedisModule) DelHash(args ...interface{}) error {
|
||||
}
|
||||
|
||||
func (m *RedisModule) LPushList(args ...interface{}) error {
|
||||
err := m.setListPush("LPUSH",args...)
|
||||
err := m.setListPush("LPUSH", args...)
|
||||
return err
|
||||
}
|
||||
|
||||
func (m *RedisModule) LPushListJSON(key interface{}, value ...interface{}) error {
|
||||
return m.setListJSONPush("LPUSH",key,value...)
|
||||
return m.setListJSONPush("LPUSH", key, value...)
|
||||
}
|
||||
|
||||
func (m *RedisModule) RPushList(args ...interface{}) error {
|
||||
err := m.setListPush("RPUSH",args...)
|
||||
err := m.setListPush("RPUSH", args...)
|
||||
return err
|
||||
}
|
||||
|
||||
func (m *RedisModule) RPushListJSON(key interface{}, value ...interface{}) error {
|
||||
return m.setListJSONPush("RPUSH",key,value...)
|
||||
return m.setListJSONPush("RPUSH", key, value...)
|
||||
}
|
||||
|
||||
//LPUSH和RPUSH
|
||||
func (m *RedisModule) setListPush(setType string,args...interface{}) error {
|
||||
// LPUSH和RPUSH
|
||||
func (m *RedisModule) setListPush(setType string, args ...interface{}) error {
|
||||
if setType != "LPUSH" && setType != "RPUSH" {
|
||||
return errors.New("Redis List Push Type Error,Must Be LPUSH or RPUSH")
|
||||
}
|
||||
@@ -685,17 +684,17 @@ func (m *RedisModule) setListPush(setType string,args...interface{}) error {
|
||||
return retErr
|
||||
}
|
||||
|
||||
func (m *RedisModule) setListJSONPush(setType string,key interface{}, value ...interface{}) error {
|
||||
func (m *RedisModule) setListJSONPush(setType string, key interface{}, value ...interface{}) error {
|
||||
args := []interface{}{key}
|
||||
for _,v := range value{
|
||||
for _, v := range value {
|
||||
jData, err := json.Marshal(v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
args = append(args,string(jData))
|
||||
args = append(args, string(jData))
|
||||
}
|
||||
|
||||
return m.setListPush(setType,args...)
|
||||
return m.setListPush(setType, args...)
|
||||
}
|
||||
|
||||
// Lrange ...
|
||||
@@ -715,7 +714,7 @@ func (m *RedisModule) LRangeList(key string, start, end int) ([]string, error) {
|
||||
return redis.Strings(reply, err)
|
||||
}
|
||||
|
||||
//获取List的长度
|
||||
// 获取List的长度
|
||||
func (m *RedisModule) GetListLen(key string) (int, error) {
|
||||
conn, err := m.getConn()
|
||||
if err != nil {
|
||||
@@ -731,11 +730,11 @@ func (m *RedisModule) GetListLen(key string) (int, error) {
|
||||
return redis.Int(reply, err)
|
||||
}
|
||||
|
||||
//弹出List最后条记录
|
||||
func (m *RedisModule) RPOPListValue(key string) (string,error) {
|
||||
// 弹出List最后条记录
|
||||
func (m *RedisModule) RPOPListValue(key string) (string, error) {
|
||||
conn, err := m.getConn()
|
||||
if err != nil {
|
||||
return "",err
|
||||
return "", err
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
@@ -783,7 +782,7 @@ func (m *RedisModule) LRange(key string, start, stop int) ([]byte, error) {
|
||||
return makeListJson(reply.([]interface{}), false), nil
|
||||
}
|
||||
|
||||
//弹出list(消息队列)数据,数据放入out fromLeft表示是否从左侧弹出 block表示是否阻塞 timeout表示阻塞超时
|
||||
// 弹出list(消息队列)数据,数据放入out fromLeft表示是否从左侧弹出 block表示是否阻塞 timeout表示阻塞超时
|
||||
func (m *RedisModule) ListPopJson(key string, fromLeft, block bool, timeout int, out interface{}) error {
|
||||
b, err := m.ListPop(key, fromLeft, block, timeout)
|
||||
if err != nil {
|
||||
@@ -796,7 +795,7 @@ func (m *RedisModule) ListPopJson(key string, fromLeft, block bool, timeout int,
|
||||
return nil
|
||||
}
|
||||
|
||||
//弹出list(消息队列)数据 fromLeft表示是否从左侧弹出 block表示是否阻塞 timeout表示阻塞超时
|
||||
// 弹出list(消息队列)数据 fromLeft表示是否从左侧弹出 block表示是否阻塞 timeout表示阻塞超时
|
||||
func (m *RedisModule) ListPop(key string, fromLeft, block bool, timeout int) ([]byte, error) {
|
||||
cmd := ""
|
||||
if fromLeft {
|
||||
@@ -838,7 +837,7 @@ func (m *RedisModule) ListPop(key string, fromLeft, block bool, timeout int) ([]
|
||||
return b, nil
|
||||
}
|
||||
|
||||
//有序集合插入Json
|
||||
// 有序集合插入Json
|
||||
func (m *RedisModule) ZADDInsertJson(key string, score float64, value interface{}) error {
|
||||
|
||||
conn, err := m.getConn()
|
||||
@@ -858,7 +857,7 @@ func (m *RedisModule) ZADDInsertJson(key string, score float64, value interface{
|
||||
return nil
|
||||
}
|
||||
|
||||
//有序集合插入
|
||||
// 有序集合插入
|
||||
func (m *RedisModule) ZADDInsert(key string, score float64, Data interface{}) error {
|
||||
conn, err := m.getConn()
|
||||
if err != nil {
|
||||
@@ -898,7 +897,7 @@ func (m *RedisModule) ZRangeJSON(key string, start, stop int, ascend bool, withS
|
||||
return nil
|
||||
}
|
||||
|
||||
//取有序set指定排名 ascend=true表示按升序遍历 否则按降序遍历
|
||||
// 取有序set指定排名 ascend=true表示按升序遍历 否则按降序遍历
|
||||
func (m *RedisModule) ZRange(key string, start, stop int, ascend bool, withScores bool) ([]byte, error) {
|
||||
conn, err := m.getConn()
|
||||
if err != nil {
|
||||
@@ -922,7 +921,7 @@ func (m *RedisModule) ZRange(key string, start, stop int, ascend bool, withScore
|
||||
return makeListJson(reply.([]interface{}), withScores), nil
|
||||
}
|
||||
|
||||
//获取有序集合长度
|
||||
// 获取有序集合长度
|
||||
func (m *RedisModule) Zcard(key string) (int, error) {
|
||||
conn, err := m.getConn()
|
||||
if err != nil {
|
||||
@@ -937,7 +936,7 @@ func (m *RedisModule) Zcard(key string) (int, error) {
|
||||
return int(reply.(int64)), nil
|
||||
}
|
||||
|
||||
//["123","234"]
|
||||
// ["123","234"]
|
||||
func makeListJson(redisReply []interface{}, withScores bool) []byte {
|
||||
var buf bytes.Buffer
|
||||
buf.WriteString("[")
|
||||
@@ -1006,7 +1005,7 @@ func (m *RedisModule) ZRangeByScore(key string, start, stop float64, ascend bool
|
||||
return makeListJson(reply.([]interface{}), withScores), nil
|
||||
}
|
||||
|
||||
//获取指定member的排名
|
||||
// 获取指定member的排名
|
||||
func (m *RedisModule) ZScore(key string, member interface{}) (float64, error) {
|
||||
conn, err := m.getConn()
|
||||
if err != nil {
|
||||
@@ -1022,7 +1021,7 @@ func (m *RedisModule) ZScore(key string, member interface{}) (float64, error) {
|
||||
return redis.Float64(reply, err)
|
||||
}
|
||||
|
||||
//获取指定member的排名
|
||||
// 获取指定member的排名
|
||||
func (m *RedisModule) ZRank(key string, member interface{}, ascend bool) (int, error) {
|
||||
conn, err := m.getConn()
|
||||
if err != nil {
|
||||
@@ -1100,17 +1099,17 @@ func (m *RedisModule) HincrbyHashInt(redisKey, hashKey string, value int) error
|
||||
func (m *RedisModule) EXPlREInsert(key string, TTl int) error {
|
||||
conn, err := m.getConn()
|
||||
if err != nil {
|
||||
return err
|
||||
return err
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
|
||||
_, err = conn.Do("expire", key, TTl)
|
||||
if err != nil {
|
||||
log.Error("expire fail,reason:%v", err)
|
||||
return err
|
||||
log.Error("expire fail,reason:%v", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (m *RedisModule) Zremrangebyrank(redisKey string, start, end interface{}) (int, error) {
|
||||
conn, err := m.getConn()
|
||||
@@ -1151,3 +1150,9 @@ func (m *RedisModule) Keys(key string) ([]string, error) {
|
||||
}
|
||||
return strs, nil
|
||||
}
|
||||
|
||||
func (m *RedisModule) OnRelease() {
|
||||
if m.redisPool != nil {
|
||||
m.redisPool.Close()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"github.com/duanhf2012/origin/util/uuid"
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
@@ -17,13 +16,13 @@ import (
|
||||
|
||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
|
||||
var DefaultReadTimeout time.Duration = time.Second*10
|
||||
var DefaultWriteTimeout time.Duration = time.Second*10
|
||||
var DefaultProcessTimeout time.Duration = time.Second*10
|
||||
var DefaultReadTimeout time.Duration = time.Second * 10
|
||||
var DefaultWriteTimeout time.Duration = time.Second * 10
|
||||
var DefaultProcessTimeout time.Duration = time.Second * 10
|
||||
|
||||
//http redirect
|
||||
type HttpRedirectData struct {
|
||||
Url string
|
||||
Url string
|
||||
CookieList []*http.Cookie
|
||||
}
|
||||
|
||||
@@ -44,7 +43,7 @@ type routerMatchData struct {
|
||||
}
|
||||
|
||||
type routerServeFileData struct {
|
||||
matchUrl string
|
||||
matchUrl string
|
||||
localPath string
|
||||
method HTTP_METHOD
|
||||
}
|
||||
@@ -56,45 +55,45 @@ type IHttpRouter interface {
|
||||
|
||||
SetServeFile(method HTTP_METHOD, urlpath string, dirname string) error
|
||||
SetFormFileKey(formFileKey string)
|
||||
GetFormFileKey()string
|
||||
GetFormFileKey() string
|
||||
AddHttpFiltrate(FiltrateFun HttpFiltrate) bool
|
||||
}
|
||||
|
||||
type HttpRouter struct {
|
||||
pathRouter map[HTTP_METHOD] map[string] routerMatchData //url地址,对应本service地址
|
||||
serveFileData map[string] *routerServeFileData
|
||||
httpFiltrateList [] HttpFiltrate
|
||||
pathRouter map[HTTP_METHOD]map[string]routerMatchData //url地址,对应本service地址
|
||||
serveFileData map[string]*routerServeFileData
|
||||
httpFiltrateList []HttpFiltrate
|
||||
|
||||
formFileKey string
|
||||
}
|
||||
|
||||
type HttpSession struct {
|
||||
httpRouter IHttpRouter
|
||||
r *http.Request
|
||||
w http.ResponseWriter
|
||||
r *http.Request
|
||||
w http.ResponseWriter
|
||||
|
||||
//parse result
|
||||
mapParam map[string]string
|
||||
body []byte
|
||||
body []byte
|
||||
|
||||
//processor result
|
||||
statusCode int
|
||||
msg []byte
|
||||
fileData *routerServeFileData
|
||||
statusCode int
|
||||
msg []byte
|
||||
fileData *routerServeFileData
|
||||
redirectData *HttpRedirectData
|
||||
sessionDone chan *HttpSession
|
||||
sessionDone chan *HttpSession
|
||||
}
|
||||
|
||||
|
||||
type HttpService struct {
|
||||
service.Service
|
||||
|
||||
httpServer network.HttpServer
|
||||
postAliasUrl map[HTTP_METHOD] map[string]routerMatchData //url地址,对应本service地址
|
||||
httpRouter IHttpRouter
|
||||
listenAddr string
|
||||
corsHeader *CORSHeader
|
||||
httpServer network.HttpServer
|
||||
postAliasUrl map[HTTP_METHOD]map[string]routerMatchData //url地址,对应本service地址
|
||||
httpRouter IHttpRouter
|
||||
listenAddr string
|
||||
corsHeader *CORSHeader
|
||||
processTimeout time.Duration
|
||||
manualStart bool
|
||||
}
|
||||
|
||||
type HttpFiltrate func(session *HttpSession) bool //true is pass
|
||||
@@ -109,16 +108,20 @@ func (httpService *HttpService) AddFiltrate(FiltrateFun HttpFiltrate) bool {
|
||||
|
||||
func NewHttpHttpRouter() IHttpRouter {
|
||||
httpRouter := &HttpRouter{}
|
||||
httpRouter.pathRouter =map[HTTP_METHOD] map[string] routerMatchData{}
|
||||
httpRouter.serveFileData = map[string] *routerServeFileData{}
|
||||
httpRouter.pathRouter = map[HTTP_METHOD]map[string]routerMatchData{}
|
||||
httpRouter.serveFileData = map[string]*routerServeFileData{}
|
||||
httpRouter.formFileKey = "file"
|
||||
for i:=METHOD_NONE+1;i<METHOD_INVALID;i++{
|
||||
httpRouter.pathRouter[i] = map[string] routerMatchData{}
|
||||
for i := METHOD_NONE + 1; i < METHOD_INVALID; i++ {
|
||||
httpRouter.pathRouter[i] = map[string]routerMatchData{}
|
||||
}
|
||||
|
||||
return httpRouter
|
||||
}
|
||||
|
||||
func (slf *HttpSession) GetRawQuery() string{
|
||||
return slf.r.URL.RawQuery
|
||||
}
|
||||
|
||||
func (slf *HttpSession) Query(key string) (string, bool) {
|
||||
if slf.mapParam == nil {
|
||||
slf.mapParam = make(map[string]string)
|
||||
@@ -137,7 +140,7 @@ func (slf *HttpSession) Query(key string) (string, bool) {
|
||||
return ret, ok
|
||||
}
|
||||
|
||||
func (slf *HttpSession) GetBody() []byte{
|
||||
func (slf *HttpSession) GetBody() []byte {
|
||||
return slf.body
|
||||
}
|
||||
|
||||
@@ -145,19 +148,19 @@ func (slf *HttpSession) GetMethod() HTTP_METHOD {
|
||||
return slf.getMethod(slf.r.Method)
|
||||
}
|
||||
|
||||
func (slf *HttpSession) GetPath() string{
|
||||
return strings.Trim(slf.r.URL.Path,"/")
|
||||
func (slf *HttpSession) GetPath() string {
|
||||
return strings.Trim(slf.r.URL.Path, "/")
|
||||
}
|
||||
|
||||
func (slf *HttpSession) SetHeader(key, value string) {
|
||||
slf.w.Header().Set(key,value)
|
||||
slf.w.Header().Set(key, value)
|
||||
}
|
||||
|
||||
func (slf *HttpSession) AddHeader(key, value string) {
|
||||
slf.w.Header().Add(key,value)
|
||||
slf.w.Header().Add(key, value)
|
||||
}
|
||||
|
||||
func (slf *HttpSession) GetHeader(key string) string{
|
||||
func (slf *HttpSession) GetHeader(key string) string {
|
||||
return slf.r.Header.Get(key)
|
||||
}
|
||||
|
||||
@@ -165,7 +168,7 @@ func (slf *HttpSession) DelHeader(key string) {
|
||||
slf.r.Header.Del(key)
|
||||
}
|
||||
|
||||
func (slf *HttpSession) WriteStatusCode(statusCode int){
|
||||
func (slf *HttpSession) WriteStatusCode(statusCode int) {
|
||||
slf.statusCode = statusCode
|
||||
}
|
||||
|
||||
@@ -173,7 +176,7 @@ func (slf *HttpSession) Write(msg []byte) {
|
||||
slf.msg = msg
|
||||
}
|
||||
|
||||
func (slf *HttpSession) WriteJsonDone(statusCode int,msgJson interface{}) error {
|
||||
func (slf *HttpSession) WriteJsonDone(statusCode int, msgJson interface{}) error {
|
||||
msg, err := json.Marshal(msgJson)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -187,12 +190,12 @@ func (slf *HttpSession) WriteJsonDone(statusCode int,msgJson interface{}) error
|
||||
|
||||
func (slf *HttpSession) flush() {
|
||||
slf.w.WriteHeader(slf.statusCode)
|
||||
if slf.msg!=nil {
|
||||
if slf.msg != nil {
|
||||
slf.w.Write(slf.msg)
|
||||
}
|
||||
}
|
||||
|
||||
func (slf *HttpSession) Done(){
|
||||
func (slf *HttpSession) Done() {
|
||||
slf.sessionDone <- slf
|
||||
}
|
||||
|
||||
@@ -219,15 +222,15 @@ func (slf *HttpRouter) analysisRouterUrl(url string) (string, error) {
|
||||
return strings.Trim(url, "/"), nil
|
||||
}
|
||||
|
||||
func (slf *HttpSession) Handle(){
|
||||
slf.httpRouter.Router(slf)
|
||||
func (slf *HttpSession) Handle() {
|
||||
slf.httpRouter.Router(slf)
|
||||
}
|
||||
|
||||
func (slf *HttpRouter) SetFormFileKey(formFileKey string){
|
||||
func (slf *HttpRouter) SetFormFileKey(formFileKey string) {
|
||||
slf.formFileKey = formFileKey
|
||||
}
|
||||
|
||||
func (slf *HttpRouter) GetFormFileKey()string{
|
||||
func (slf *HttpRouter) GetFormFileKey() string {
|
||||
return slf.formFileKey
|
||||
}
|
||||
|
||||
@@ -239,19 +242,19 @@ func (slf *HttpRouter) POST(url string, handle HttpHandle) bool {
|
||||
return slf.regRouter(METHOD_POST, url, handle)
|
||||
}
|
||||
|
||||
func (slf *HttpRouter) regRouter(method HTTP_METHOD, url string, handle HttpHandle) bool{
|
||||
mapRouter,ok := slf.pathRouter[method]
|
||||
if ok == false{
|
||||
func (slf *HttpRouter) regRouter(method HTTP_METHOD, url string, handle HttpHandle) bool {
|
||||
mapRouter, ok := slf.pathRouter[method]
|
||||
if ok == false {
|
||||
return false
|
||||
}
|
||||
|
||||
mapRouter[strings.Trim(url,"/")] = routerMatchData{httpHandle:handle}
|
||||
mapRouter[strings.Trim(url, "/")] = routerMatchData{httpHandle: handle}
|
||||
return true
|
||||
}
|
||||
|
||||
func (slf *HttpRouter) Router(session *HttpSession){
|
||||
if slf.httpFiltrateList!=nil {
|
||||
for _,fun := range slf.httpFiltrateList{
|
||||
func (slf *HttpRouter) Router(session *HttpSession) {
|
||||
if slf.httpFiltrateList != nil {
|
||||
for _, fun := range slf.httpFiltrateList {
|
||||
if fun(session) == false {
|
||||
//session.done()
|
||||
return
|
||||
@@ -288,13 +291,13 @@ func (slf *HttpRouter) Router(session *HttpSession){
|
||||
session.Done()
|
||||
}
|
||||
|
||||
func (httpService *HttpService) HttpEventHandler(ev event.IEvent) {
|
||||
func (httpService *HttpService) HttpEventHandler(ev event.IEvent) {
|
||||
ev.(*event.Event).Data.(*HttpSession).Handle()
|
||||
}
|
||||
|
||||
func (httpService *HttpService) SetHttpRouter(httpRouter IHttpRouter,eventHandler event.IEventHandler) {
|
||||
func (httpService *HttpService) SetHttpRouter(httpRouter IHttpRouter, eventHandler event.IEventHandler) {
|
||||
httpService.httpRouter = httpRouter
|
||||
httpService.RegEventReceiverFunc(event.Sys_Event_Http_Event,eventHandler, httpService.HttpEventHandler)
|
||||
httpService.RegEventReceiverFunc(event.Sys_Event_Http_Event, eventHandler, httpService.HttpEventHandler)
|
||||
}
|
||||
|
||||
func (slf *HttpRouter) SetServeFile(method HTTP_METHOD, urlpath string, dirname string) error {
|
||||
@@ -349,68 +352,84 @@ func (httpService *HttpService) OnInit() error {
|
||||
if iConfig == nil {
|
||||
return fmt.Errorf("%s service config is error!", httpService.GetName())
|
||||
}
|
||||
tcpCfg := iConfig.(map[string]interface{})
|
||||
addr,ok := tcpCfg["ListenAddr"]
|
||||
httpCfg := iConfig.(map[string]interface{})
|
||||
addr, ok := httpCfg["ListenAddr"]
|
||||
if ok == false {
|
||||
return fmt.Errorf("%s service config is error!", httpService.GetName())
|
||||
}
|
||||
var readTimeout time.Duration = DefaultReadTimeout
|
||||
var writeTimeout time.Duration = DefaultWriteTimeout
|
||||
|
||||
if cfgRead,ok := tcpCfg["ReadTimeout"];ok == true {
|
||||
readTimeout = time.Duration(cfgRead.(float64))*time.Millisecond
|
||||
if cfgRead, ok := httpCfg["ReadTimeout"]; ok == true {
|
||||
readTimeout = time.Duration(cfgRead.(float64)) * time.Millisecond
|
||||
}
|
||||
|
||||
if cfgWrite,ok := tcpCfg["WriteTimeout"];ok == true {
|
||||
writeTimeout = time.Duration(cfgWrite.(float64))*time.Millisecond
|
||||
if cfgWrite, ok := httpCfg["WriteTimeout"]; ok == true {
|
||||
writeTimeout = time.Duration(cfgWrite.(float64)) * time.Millisecond
|
||||
}
|
||||
|
||||
if manualStart, ok := httpCfg["ManualStart"]; ok == true {
|
||||
httpService.manualStart = manualStart.(bool)
|
||||
}else{
|
||||
manualStart =false
|
||||
}
|
||||
|
||||
httpService.processTimeout = DefaultProcessTimeout
|
||||
if cfgProcessTimeout,ok := tcpCfg["ProcessTimeout"];ok == true {
|
||||
httpService.processTimeout = time.Duration(cfgProcessTimeout.(float64))*time.Millisecond
|
||||
if cfgProcessTimeout, ok := httpCfg["ProcessTimeout"]; ok == true {
|
||||
httpService.processTimeout = time.Duration(cfgProcessTimeout.(float64)) * time.Millisecond
|
||||
}
|
||||
|
||||
httpService.httpServer.Init(addr.(string), httpService, readTimeout, writeTimeout)
|
||||
//Set CAFile
|
||||
caFileList,ok := tcpCfg["CAFile"]
|
||||
caFileList, ok := httpCfg["CAFile"]
|
||||
if ok == false {
|
||||
return nil
|
||||
}
|
||||
iCaList := caFileList.([]interface{})
|
||||
var caFile [] network.CAFile
|
||||
for _,i := range iCaList {
|
||||
var caFile []network.CAFile
|
||||
for _, i := range iCaList {
|
||||
mapCAFile := i.(map[string]interface{})
|
||||
c,ok := mapCAFile["Certfile"]
|
||||
if ok == false{
|
||||
c, ok := mapCAFile["Certfile"]
|
||||
if ok == false {
|
||||
continue
|
||||
}
|
||||
k,ok := mapCAFile["Keyfile"]
|
||||
if ok == false{
|
||||
k, ok := mapCAFile["Keyfile"]
|
||||
if ok == false {
|
||||
continue
|
||||
}
|
||||
|
||||
if c.(string)!="" && k.(string)!="" {
|
||||
caFile = append(caFile,network.CAFile{
|
||||
CertFile: c.(string),
|
||||
if c.(string) != "" && k.(string) != "" {
|
||||
caFile = append(caFile, network.CAFile{
|
||||
CertFile: c.(string),
|
||||
Keyfile: k.(string),
|
||||
})
|
||||
}
|
||||
}
|
||||
httpService.httpServer.SetCAFile(caFile)
|
||||
httpService.httpServer.Start()
|
||||
|
||||
if httpService.manualStart == false {
|
||||
httpService.httpServer.Start()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (httpService *HttpService) StartListen() {
|
||||
if httpService.manualStart {
|
||||
httpService.httpServer.Start()
|
||||
}
|
||||
}
|
||||
|
||||
func (httpService *HttpService) SetAllowCORS(corsHeader *CORSHeader) {
|
||||
httpService.corsHeader = corsHeader
|
||||
}
|
||||
|
||||
func (httpService *HttpService) ProcessFile(session *HttpSession){
|
||||
func (httpService *HttpService) ProcessFile(session *HttpSession) {
|
||||
uPath := session.r.URL.Path
|
||||
idx := strings.Index(uPath, session.fileData.matchUrl)
|
||||
subPath := strings.Trim(uPath[idx+len(session.fileData.matchUrl):], "/")
|
||||
|
||||
destLocalPath := session.fileData.localPath + "/"+subPath
|
||||
destLocalPath := session.fileData.localPath + "/" + subPath
|
||||
|
||||
switch session.GetMethod() {
|
||||
case METHOD_GET:
|
||||
@@ -454,29 +473,29 @@ func (httpService *HttpService) ProcessFile(session *HttpSession){
|
||||
defer localFd.Close()
|
||||
io.Copy(localFd, resourceFile)
|
||||
session.WriteStatusCode(http.StatusOK)
|
||||
session.Write([]byte(uPath+"/"+fileName))
|
||||
session.Write([]byte(uPath + "/" + fileName))
|
||||
session.flush()
|
||||
}
|
||||
}
|
||||
|
||||
func NewAllowCORSHeader() *CORSHeader{
|
||||
func NewAllowCORSHeader() *CORSHeader {
|
||||
header := &CORSHeader{}
|
||||
header.AllowCORSHeader = map[string][]string{}
|
||||
header.AllowCORSHeader["Access-Control-Allow-Origin"] = []string{"*"}
|
||||
header.AllowCORSHeader["Access-Control-Allow-Methods"] =[]string{ "POST, GET, OPTIONS, PUT, DELETE"}
|
||||
header.AllowCORSHeader["Access-Control-Allow-Methods"] = []string{"POST, GET, OPTIONS, PUT, DELETE"}
|
||||
header.AllowCORSHeader["Access-Control-Allow-Headers"] = []string{"Content-Type"}
|
||||
|
||||
return header
|
||||
}
|
||||
|
||||
func (slf *CORSHeader) AddAllowHeader(key string,val string){
|
||||
slf.AllowCORSHeader["Access-Control-Allow-Headers"] = append(slf.AllowCORSHeader["Access-Control-Allow-Headers"],fmt.Sprintf("%s,%s",key,val))
|
||||
func (slf *CORSHeader) AddAllowHeader(key string, val string) {
|
||||
slf.AllowCORSHeader["Access-Control-Allow-Headers"] = append(slf.AllowCORSHeader["Access-Control-Allow-Headers"], fmt.Sprintf("%s,%s", key, val))
|
||||
}
|
||||
|
||||
func (slf *CORSHeader) copyTo(header http.Header){
|
||||
for k,v := range slf.AllowCORSHeader{
|
||||
for _,val := range v{
|
||||
header.Add(k,val)
|
||||
func (slf *CORSHeader) copyTo(header http.Header) {
|
||||
for k, v := range slf.AllowCORSHeader {
|
||||
for _, val := range v {
|
||||
header.Add(k, val)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -491,12 +510,12 @@ func (httpService *HttpService) ServeHTTP(w http.ResponseWriter, r *http.Request
|
||||
return
|
||||
}
|
||||
|
||||
session := &HttpSession{sessionDone:make(chan *HttpSession,1),httpRouter:httpService.httpRouter,statusCode:http.StatusOK}
|
||||
session := &HttpSession{sessionDone: make(chan *HttpSession, 1), httpRouter: httpService.httpRouter, statusCode: http.StatusOK}
|
||||
session.r = r
|
||||
session.w = w
|
||||
|
||||
defer r.Body.Close()
|
||||
body, err := ioutil.ReadAll(r.Body)
|
||||
body, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
session.WriteStatusCode(http.StatusGatewayTimeout)
|
||||
session.flush()
|
||||
@@ -504,19 +523,19 @@ func (httpService *HttpService) ServeHTTP(w http.ResponseWriter, r *http.Request
|
||||
}
|
||||
session.body = body
|
||||
|
||||
httpService.GetEventHandler().NotifyEvent(&event.Event{Type:event.Sys_Event_Http_Event,Data:session})
|
||||
httpService.GetEventHandler().NotifyEvent(&event.Event{Type: event.Sys_Event_Http_Event, Data: session})
|
||||
ticker := time.NewTicker(httpService.processTimeout)
|
||||
select {
|
||||
case <-ticker.C:
|
||||
session.WriteStatusCode(http.StatusGatewayTimeout)
|
||||
session.flush()
|
||||
break
|
||||
case <- session.sessionDone:
|
||||
if session.fileData!=nil {
|
||||
case <-session.sessionDone:
|
||||
if session.fileData != nil {
|
||||
httpService.ProcessFile(session)
|
||||
}else if session.redirectData!=nil {
|
||||
} else if session.redirectData != nil {
|
||||
session.redirects()
|
||||
}else{
|
||||
} else {
|
||||
session.flush()
|
||||
}
|
||||
}
|
||||
|
||||
229
sysservice/messagequeueservice/CustomerSubscriber.go
Normal file
229
sysservice/messagequeueservice/CustomerSubscriber.go
Normal file
@@ -0,0 +1,229 @@
|
||||
package messagequeueservice
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/duanhf2012/origin/cluster"
|
||||
"github.com/duanhf2012/origin/log"
|
||||
"github.com/duanhf2012/origin/rpc"
|
||||
"github.com/duanhf2012/origin/util/coroutine"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
type CustomerSubscriber struct {
|
||||
rpc.IRpcHandler
|
||||
topic string
|
||||
subscriber *Subscriber
|
||||
fromNodeId int
|
||||
callBackRpcMethod string
|
||||
serviceName string
|
||||
StartIndex uint64
|
||||
oneBatchQuantity int32
|
||||
subscribeMethod SubscribeMethod
|
||||
customerId string
|
||||
|
||||
isStop int32 //退出标记
|
||||
}
|
||||
|
||||
const DefaultOneBatchQuantity = 1000
|
||||
|
||||
type SubscribeMethod = int32
|
||||
|
||||
const (
|
||||
MethodCustom SubscribeMethod = 0 //自定义模式,以消费者设置的StartIndex开始获取或订阅
|
||||
MethodLast SubscribeMethod = 1 //Last模式,以该消费者上次记录的位置开始订阅
|
||||
)
|
||||
|
||||
func (cs *CustomerSubscriber) trySetSubscriberBaseInfo(rpcHandler rpc.IRpcHandler, ss *Subscriber, topic string, subscribeMethod SubscribeMethod, customerId string, fromNodeId int, callBackRpcMethod string, startIndex uint64, oneBatchQuantity int32) error {
|
||||
cs.subscriber = ss
|
||||
cs.fromNodeId = fromNodeId
|
||||
cs.callBackRpcMethod = callBackRpcMethod
|
||||
//cs.StartIndex = startIndex
|
||||
cs.subscribeMethod = subscribeMethod
|
||||
cs.customerId = customerId
|
||||
cs.StartIndex = startIndex
|
||||
cs.topic = topic
|
||||
cs.IRpcHandler = rpcHandler
|
||||
if oneBatchQuantity == 0 {
|
||||
cs.oneBatchQuantity = DefaultOneBatchQuantity
|
||||
} else {
|
||||
cs.oneBatchQuantity = oneBatchQuantity
|
||||
}
|
||||
|
||||
strRpcMethod := strings.Split(callBackRpcMethod, ".")
|
||||
if len(strRpcMethod) != 2 {
|
||||
err := errors.New("RpcMethod " + callBackRpcMethod + " is error")
|
||||
log.SError(err.Error())
|
||||
return err
|
||||
}
|
||||
cs.serviceName = strRpcMethod[0]
|
||||
|
||||
if cluster.HasService(fromNodeId, cs.serviceName) == false {
|
||||
err := fmt.Errorf("nodeId %d cannot found %s", fromNodeId, cs.serviceName)
|
||||
log.SError(err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
if cluster.GetCluster().IsNodeConnected(fromNodeId) == false {
|
||||
err := fmt.Errorf("nodeId %d is disconnect", fromNodeId)
|
||||
log.SError(err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
if startIndex == 0 {
|
||||
now := time.Now()
|
||||
zeroTime := time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, now.Location())
|
||||
//fmt.Println(zeroTime.Unix())
|
||||
cs.StartIndex = uint64(zeroTime.Unix() << 32)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// 开始订阅
|
||||
func (cs *CustomerSubscriber) Subscribe(rpcHandler rpc.IRpcHandler, ss *Subscriber, topic string, subscribeMethod SubscribeMethod, customerId string, fromNodeId int, callBackRpcMethod string, startIndex uint64, oneBatchQuantity int32) error {
|
||||
err := cs.trySetSubscriberBaseInfo(rpcHandler, ss, topic, subscribeMethod, customerId, fromNodeId, callBackRpcMethod, startIndex, oneBatchQuantity)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cs.subscriber.queueWait.Add(1)
|
||||
coroutine.GoRecover(cs.SubscribeRun, -1)
|
||||
return nil
|
||||
}
|
||||
|
||||
// 取消订阅
|
||||
func (cs *CustomerSubscriber) UnSubscribe() {
|
||||
atomic.StoreInt32(&cs.isStop, 1)
|
||||
}
|
||||
|
||||
func (cs *CustomerSubscriber) LoadLastIndex() {
|
||||
for {
|
||||
if atomic.LoadInt32(&cs.isStop) != 0 {
|
||||
log.SRelease("topic ", cs.topic, " out of subscription")
|
||||
break
|
||||
}
|
||||
|
||||
log.SRelease("customer ", cs.customerId, " start load last index ")
|
||||
lastIndex, ret := cs.subscriber.dataPersist.LoadCustomerIndex(cs.topic, cs.customerId)
|
||||
if ret == true {
|
||||
if lastIndex > 0 {
|
||||
cs.StartIndex = lastIndex
|
||||
} else {
|
||||
//否则直接使用客户端发回来的
|
||||
}
|
||||
log.SRelease("customer ", cs.customerId, " load finish,start index is ", cs.StartIndex)
|
||||
break
|
||||
}
|
||||
|
||||
log.SRelease("customer ", cs.customerId, " load last index is fail...")
|
||||
time.Sleep(5 * time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
func (cs *CustomerSubscriber) SubscribeRun() {
|
||||
defer cs.subscriber.queueWait.Done()
|
||||
log.SRelease("topic ", cs.topic, " start subscription")
|
||||
|
||||
//加载之前的位置
|
||||
if cs.subscribeMethod == MethodLast {
|
||||
cs.LoadLastIndex()
|
||||
}
|
||||
|
||||
for {
|
||||
if atomic.LoadInt32(&cs.isStop) != 0 {
|
||||
log.SRelease("topic ", cs.topic, " out of subscription")
|
||||
break
|
||||
}
|
||||
|
||||
if cs.checkCustomerIsValid() == false {
|
||||
break
|
||||
}
|
||||
|
||||
//todo 检测退出
|
||||
if cs.subscribe() == false {
|
||||
log.SRelease("topic ", cs.topic, " out of subscription")
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
//删除订阅关系
|
||||
cs.subscriber.removeCustomer(cs.customerId, cs)
|
||||
log.SRelease("topic ", cs.topic, " unsubscription")
|
||||
}
|
||||
|
||||
func (cs *CustomerSubscriber) subscribe() bool {
|
||||
//先从内存中查找
|
||||
topicData, ret := cs.subscriber.queue.FindData(cs.StartIndex, cs.oneBatchQuantity)
|
||||
if ret == true {
|
||||
cs.publishToCustomer(topicData)
|
||||
return true
|
||||
}
|
||||
|
||||
//从持久化数据中来找
|
||||
topicData = cs.subscriber.dataPersist.FindTopicData(cs.topic, cs.StartIndex, int64(cs.oneBatchQuantity))
|
||||
return cs.publishToCustomer(topicData)
|
||||
}
|
||||
|
||||
func (cs *CustomerSubscriber) checkCustomerIsValid() bool {
|
||||
//1.检查nodeid是否在线,不在线,直接取消订阅
|
||||
if cluster.GetCluster().IsNodeConnected(cs.fromNodeId) == false {
|
||||
return false
|
||||
}
|
||||
|
||||
//2.验证是否有该服务,如果没有则退出
|
||||
if cluster.HasService(cs.fromNodeId, cs.serviceName) == false {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (cs *CustomerSubscriber) publishToCustomer(topicData []TopicData) bool {
|
||||
if cs.checkCustomerIsValid() == false {
|
||||
return false
|
||||
}
|
||||
|
||||
if len(topicData) == 0 {
|
||||
//没有任何数据待一秒吧
|
||||
time.Sleep(time.Millisecond * 100)
|
||||
return true
|
||||
}
|
||||
|
||||
//3.发送失败重试发送
|
||||
var dbQueuePublishReq rpc.DBQueuePublishReq
|
||||
var dbQueuePushRes rpc.DBQueuePublishRes
|
||||
dbQueuePublishReq.TopicName = cs.topic
|
||||
cs.subscriber.dataPersist.OnPushTopicDataToCustomer(cs.topic, topicData)
|
||||
for i := 0; i < len(topicData); i++ {
|
||||
dbQueuePublishReq.PushData = append(dbQueuePublishReq.PushData, topicData[i].RawData)
|
||||
}
|
||||
|
||||
for {
|
||||
if atomic.LoadInt32(&cs.isStop) != 0 {
|
||||
break
|
||||
}
|
||||
|
||||
if cs.checkCustomerIsValid() == false {
|
||||
return false
|
||||
}
|
||||
|
||||
//推送数据
|
||||
err := cs.CallNode(cs.fromNodeId, cs.callBackRpcMethod, &dbQueuePublishReq, &dbQueuePushRes)
|
||||
if err != nil {
|
||||
time.Sleep(time.Second * 1)
|
||||
continue
|
||||
}
|
||||
|
||||
//持久化进度
|
||||
endIndex := cs.subscriber.dataPersist.GetIndex(&topicData[len(topicData)-1])
|
||||
cs.StartIndex = endIndex
|
||||
cs.subscriber.dataPersist.PersistIndex(cs.topic, cs.customerId, endIndex)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
97
sysservice/messagequeueservice/MemoryQueue.go
Normal file
97
sysservice/messagequeueservice/MemoryQueue.go
Normal file
@@ -0,0 +1,97 @@
|
||||
package messagequeueservice
|
||||
|
||||
import (
|
||||
"github.com/duanhf2012/origin/util/algorithms"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type MemoryQueue struct {
|
||||
subscriber *Subscriber
|
||||
|
||||
topicQueue []TopicData
|
||||
head int32
|
||||
tail int32
|
||||
|
||||
locker sync.RWMutex
|
||||
}
|
||||
|
||||
func (mq *MemoryQueue) Init(cap int32) {
|
||||
mq.topicQueue = make([]TopicData, cap+1)
|
||||
}
|
||||
|
||||
// 从队尾Push数据
|
||||
func (mq *MemoryQueue) Push(topicData *TopicData) bool {
|
||||
mq.locker.Lock()
|
||||
defer mq.locker.Unlock()
|
||||
|
||||
nextPos := (mq.tail + 1) % int32(len(mq.topicQueue))
|
||||
//如果队列满了
|
||||
if nextPos == mq.head {
|
||||
//将对首的数据删除掉
|
||||
mq.head++
|
||||
mq.head = mq.head % int32(len(mq.topicQueue))
|
||||
}
|
||||
|
||||
mq.tail = nextPos
|
||||
mq.topicQueue[mq.tail] = *topicData
|
||||
return true
|
||||
}
|
||||
|
||||
func (mq *MemoryQueue) findData(startPos int32, startIndex uint64, limit int32) ([]TopicData, bool) {
|
||||
//空队列,无数据
|
||||
if mq.head == mq.tail {
|
||||
return nil, true
|
||||
}
|
||||
|
||||
var findStartPos int32
|
||||
var findEndPos int32
|
||||
findStartPos = startPos //(mq.head + 1) % cap(mq.topicQueue)
|
||||
if findStartPos <= mq.tail {
|
||||
findEndPos = mq.tail + 1
|
||||
} else {
|
||||
findEndPos = int32(cap(mq.topicQueue))
|
||||
}
|
||||
|
||||
//二分查找位置
|
||||
pos := int32(algorithms.BiSearch(mq.topicQueue[findStartPos:findEndPos], startIndex, 1))
|
||||
if pos == -1 {
|
||||
return nil, true
|
||||
}
|
||||
|
||||
pos += findStartPos
|
||||
//取得结束位置
|
||||
endPos := limit + pos
|
||||
if endPos > findEndPos {
|
||||
endPos = findEndPos
|
||||
}
|
||||
|
||||
return mq.topicQueue[pos:endPos], true
|
||||
}
|
||||
|
||||
// FindData 返回参数[]TopicData 表示查找到的数据,nil表示无数据。bool表示是否不应该在内存中来查
|
||||
func (mq *MemoryQueue) FindData(startIndex uint64, limit int32) ([]TopicData, bool) {
|
||||
mq.locker.RLock()
|
||||
defer mq.locker.RUnlock()
|
||||
|
||||
//队列为空时,应该从数据库查找
|
||||
if mq.head == mq.tail {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
/*
|
||||
//先判断startIndex是否比第一个元素要大
|
||||
headTopic := (mq.head + 1) % int32(len(mq.topicQueue))
|
||||
//此时需要从持久化数据中取
|
||||
if startIndex+1 > mq.topicQueue[headTopic].Seq {
|
||||
return nil, false
|
||||
}
|
||||
*/
|
||||
|
||||
retData, ret := mq.findData(mq.head+1, startIndex, limit)
|
||||
if mq.head <= mq.tail || ret == true {
|
||||
return retData, true
|
||||
}
|
||||
|
||||
//如果是正常head在后,尾在前,从数组0下标开始找到tail
|
||||
return mq.findData(0, startIndex, limit)
|
||||
}
|
||||
36
sysservice/messagequeueservice/MemoryQueue_test.go
Normal file
36
sysservice/messagequeueservice/MemoryQueue_test.go
Normal file
@@ -0,0 +1,36 @@
|
||||
package messagequeueservice
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type In int
|
||||
|
||||
func (i In) GetValue() int {
|
||||
return int(i)
|
||||
}
|
||||
|
||||
func Test_BiSearch(t *testing.T) {
|
||||
var memQueue MemoryQueue
|
||||
memQueue.Init(5)
|
||||
|
||||
for i := 1; i <= 8; i++ {
|
||||
memQueue.Push(&TopicData{Seq: uint64(i)})
|
||||
}
|
||||
|
||||
startindex := uint64(0)
|
||||
for {
|
||||
retData, ret := memQueue.FindData(startindex+1, 10)
|
||||
fmt.Println(retData, ret)
|
||||
for _, d := range retData {
|
||||
if d.Seq > startindex {
|
||||
startindex = d.Seq
|
||||
}
|
||||
}
|
||||
if ret == false {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
126
sysservice/messagequeueservice/MessageQueueService.go
Normal file
126
sysservice/messagequeueservice/MessageQueueService.go
Normal file
@@ -0,0 +1,126 @@
|
||||
package messagequeueservice
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/duanhf2012/origin/log"
|
||||
"github.com/duanhf2012/origin/service"
|
||||
"github.com/duanhf2012/origin/rpc"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type QueueDataPersist interface {
|
||||
service.IModule
|
||||
|
||||
OnExit()
|
||||
OnReceiveTopicData(topic string, topicData []TopicData) //当收到推送过来的数据时
|
||||
OnPushTopicDataToCustomer(topic string, topicData []TopicData) //当推送数据到Customer时回调
|
||||
PersistTopicData(topic string, topicData []TopicData, retryCount int) ([]TopicData, bool) //持久化数据,失败则返回false,上层会重复尝试,直到成功,建议在函数中加入次数,超过次数则返回true
|
||||
FindTopicData(topic string, startIndex uint64, limit int64) []TopicData //查找数据,参数bool代表数据库查找是否成功
|
||||
LoadCustomerIndex(topic string, customerId string) (uint64, bool) //false时代表获取失败,一般是读取错误,会进行重试。如果不存在时,返回(0,true)
|
||||
GetIndex(topicData *TopicData) uint64 //通过topic数据获取进度索引号
|
||||
PersistIndex(topic string, customerId string, index uint64) //持久化进度索引号
|
||||
}
|
||||
|
||||
type MessageQueueService struct {
|
||||
service.Service
|
||||
|
||||
sync.Mutex
|
||||
mapTopicRoom map[string]*TopicRoom
|
||||
|
||||
queueWait sync.WaitGroup
|
||||
dataPersist QueueDataPersist
|
||||
|
||||
memoryQueueLen int32
|
||||
maxProcessTopicBacklogNum int32 //最大积压的数据量,因为是写入到channel中,然后由协程取出再持久化,不设置有默认值100000
|
||||
}
|
||||
|
||||
func (ms *MessageQueueService) OnInit() error {
|
||||
ms.mapTopicRoom = map[string]*TopicRoom{}
|
||||
errC := ms.ReadCfg()
|
||||
if errC != nil {
|
||||
return errC
|
||||
}
|
||||
|
||||
if ms.dataPersist == nil {
|
||||
return errors.New("not setup QueueDataPersist.")
|
||||
}
|
||||
|
||||
_, err := ms.AddModule(ms.dataPersist)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ms *MessageQueueService) ReadCfg() error {
|
||||
mapDBServiceCfg, ok := ms.GetService().GetServiceCfg().(map[string]interface{})
|
||||
if ok == false {
|
||||
return fmt.Errorf("MessageQueueService config is error")
|
||||
}
|
||||
|
||||
maxProcessTopicBacklogNum, ok := mapDBServiceCfg["MaxProcessTopicBacklogNum"]
|
||||
if ok == false {
|
||||
ms.maxProcessTopicBacklogNum = DefaultMaxTopicBacklogNum
|
||||
log.SRelease("MaxProcessTopicBacklogNum config is set to the default value of ", maxProcessTopicBacklogNum)
|
||||
} else {
|
||||
ms.maxProcessTopicBacklogNum = int32(maxProcessTopicBacklogNum.(float64))
|
||||
}
|
||||
|
||||
memoryQueueLen, ok := mapDBServiceCfg["MemoryQueueLen"]
|
||||
if ok == false {
|
||||
ms.memoryQueueLen = DefaultMemoryQueueLen
|
||||
log.SRelease("MemoryQueueLen config is set to the default value of ", DefaultMemoryQueueLen)
|
||||
} else {
|
||||
ms.memoryQueueLen = int32(memoryQueueLen.(float64))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ms *MessageQueueService) Setup(dataPersist QueueDataPersist) {
|
||||
ms.dataPersist = dataPersist
|
||||
}
|
||||
|
||||
func (ms *MessageQueueService) OnRelease() {
|
||||
|
||||
//停止所有的TopicRoom房间
|
||||
ms.Lock()
|
||||
for _, room := range ms.mapTopicRoom {
|
||||
room.Stop()
|
||||
}
|
||||
ms.Unlock()
|
||||
|
||||
//释放时确保所有的协程退出
|
||||
ms.queueWait.Wait()
|
||||
|
||||
//通知持久化对象
|
||||
ms.dataPersist.OnExit()
|
||||
}
|
||||
|
||||
func (ms *MessageQueueService) GetTopicRoom(topic string) *TopicRoom {
|
||||
ms.Lock()
|
||||
defer ms.Unlock()
|
||||
topicRoom := ms.mapTopicRoom[topic]
|
||||
if topicRoom != nil {
|
||||
return topicRoom
|
||||
}
|
||||
|
||||
topicRoom = &TopicRoom{}
|
||||
topicRoom.Init(ms.maxProcessTopicBacklogNum, ms.memoryQueueLen, topic, &ms.queueWait, ms.dataPersist)
|
||||
ms.mapTopicRoom[topic] = topicRoom
|
||||
|
||||
return topicRoom
|
||||
}
|
||||
|
||||
func (ms *MessageQueueService) RPC_Publish(inParam *rpc.DBQueuePublishReq, outParam *rpc.DBQueuePublishRes) error {
|
||||
|
||||
topicRoom := ms.GetTopicRoom(inParam.TopicName)
|
||||
return topicRoom.Publish(inParam.PushData)
|
||||
}
|
||||
|
||||
func (ms *MessageQueueService) RPC_Subscribe(req *rpc.DBQueueSubscribeReq, res *rpc.DBQueueSubscribeRes) error {
|
||||
topicRoom := ms.GetTopicRoom(req.TopicName)
|
||||
return topicRoom.TopicSubscribe(ms.GetRpcHandler(), req.SubType, int32(req.Method), int(req.FromNodeId), req.RpcMethod, req.TopicName, req.CustomerId, req.StartIndex, req.OneBatchQuantity)
|
||||
}
|
||||
402
sysservice/messagequeueservice/MongoPersist.go
Normal file
402
sysservice/messagequeueservice/MongoPersist.go
Normal file
@@ -0,0 +1,402 @@
|
||||
package messagequeueservice
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/duanhf2012/origin/log"
|
||||
"github.com/duanhf2012/origin/service"
|
||||
"github.com/duanhf2012/origin/sysmodule/mongodbmodule"
|
||||
"go.mongodb.org/mongo-driver/bson"
|
||||
"go.mongodb.org/mongo-driver/mongo/options"
|
||||
"sunserver/common/util"
|
||||
"time"
|
||||
)
|
||||
|
||||
const MaxDays = 180
|
||||
|
||||
type MongoPersist struct {
|
||||
service.Module
|
||||
mongo mongodbmodule.MongoModule
|
||||
|
||||
url string //连接url
|
||||
dbName string //数据库名称
|
||||
retryCount int //落地数据库重试次数
|
||||
|
||||
topic []TopicData //用于临时缓存
|
||||
}
|
||||
|
||||
const CustomerCollectName = "SysCustomer"
|
||||
|
||||
func (mp *MongoPersist) OnInit() error {
|
||||
if errC := mp.ReadCfg(); errC != nil {
|
||||
return errC
|
||||
}
|
||||
|
||||
err := mp.mongo.Init(mp.url, time.Second*15)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = mp.mongo.Start()
|
||||
if err != nil {
|
||||
log.SError("start dbService[", mp.dbName, "], url[", mp.url, "] init error:", err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
//添加索引
|
||||
var IndexKey [][]string
|
||||
var keys []string
|
||||
keys = append(keys, "Customer", "Topic")
|
||||
IndexKey = append(IndexKey, keys)
|
||||
s := mp.mongo.TakeSession()
|
||||
if err := s.EnsureUniqueIndex(mp.dbName, CustomerCollectName, IndexKey, true, true); err != nil {
|
||||
log.SError("EnsureUniqueIndex is fail ", err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mp *MongoPersist) ReadCfg() error {
|
||||
mapDBServiceCfg, ok := mp.GetService().GetServiceCfg().(map[string]interface{})
|
||||
if ok == false {
|
||||
return fmt.Errorf("MessageQueueService config is error")
|
||||
}
|
||||
|
||||
//parse MsgRouter
|
||||
url, ok := mapDBServiceCfg["Url"]
|
||||
if ok == false {
|
||||
return fmt.Errorf("MessageQueueService config is error")
|
||||
}
|
||||
mp.url = url.(string)
|
||||
|
||||
dbName, ok := mapDBServiceCfg["DBName"]
|
||||
if ok == false {
|
||||
return fmt.Errorf("MessageQueueService config is error")
|
||||
}
|
||||
mp.dbName = dbName.(string)
|
||||
|
||||
//
|
||||
goroutineNum, ok := mapDBServiceCfg["RetryCount"]
|
||||
if ok == false {
|
||||
return fmt.Errorf("MongoPersist config is error")
|
||||
}
|
||||
mp.retryCount = int(goroutineNum.(float64))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mp *MongoPersist) getTopicBuff(limit int) []TopicData {
|
||||
if cap(mp.topic) < limit {
|
||||
mp.topic = make([]TopicData, limit)
|
||||
}
|
||||
|
||||
return mp.topic[:0]
|
||||
}
|
||||
|
||||
func (mp *MongoPersist) OnExit() {
|
||||
}
|
||||
|
||||
// OnReceiveTopicData 当收到推送过来的数据时
|
||||
func (mp *MongoPersist) OnReceiveTopicData(topic string, topicData []TopicData) {
|
||||
//1.收到推送过来的数据,在里面插入_id字段
|
||||
for i := 0; i < len(topicData); i++ {
|
||||
var document bson.D
|
||||
err := bson.Unmarshal(topicData[i].RawData, &document)
|
||||
if err != nil {
|
||||
topicData[i].RawData = nil
|
||||
log.SError(topic, " data Unmarshal is fail ", err.Error())
|
||||
continue
|
||||
}
|
||||
|
||||
document = append(document, bson.E{Key: "_id", Value: topicData[i].Seq})
|
||||
|
||||
byteRet, err := bson.Marshal(document)
|
||||
if err != nil {
|
||||
topicData[i].RawData = nil
|
||||
log.SError(topic, " data Marshal is fail ", err.Error())
|
||||
continue
|
||||
}
|
||||
topicData[i].ExtendParam = document
|
||||
topicData[i].RawData = byteRet
|
||||
}
|
||||
}
|
||||
|
||||
// OnPushTopicDataToCustomer 当推送数据到Customer时回调
|
||||
func (mp *MongoPersist) OnPushTopicDataToCustomer(topic string, topicData []TopicData) {
|
||||
|
||||
}
|
||||
|
||||
// PersistTopicData 持久化数据
|
||||
func (mp *MongoPersist) persistTopicData(collectionName string, topicData []TopicData, retryCount int) bool {
|
||||
s := mp.mongo.TakeSession()
|
||||
ctx, cancel := s.GetDefaultContext()
|
||||
defer cancel()
|
||||
|
||||
var documents []interface{}
|
||||
for _, tData := range topicData {
|
||||
if tData.ExtendParam == nil {
|
||||
continue
|
||||
}
|
||||
documents = append(documents, tData.ExtendParam)
|
||||
}
|
||||
|
||||
_, err := s.Collection(mp.dbName, collectionName).InsertMany(ctx, documents)
|
||||
if err != nil {
|
||||
log.SError("PersistTopicData InsertMany fail,collect name is ", collectionName)
|
||||
|
||||
//失败最大重试数量
|
||||
return retryCount >= mp.retryCount
|
||||
}
|
||||
|
||||
//log.SRelease("+++++++++====", time.Now().UnixNano())
|
||||
return true
|
||||
}
|
||||
|
||||
// PersistTopicData 持久化数据
|
||||
func (mp *MongoPersist) PersistTopicData(topic string, topicData []TopicData, retryCount int) ([]TopicData, bool) {
|
||||
if len(topicData) == 0 {
|
||||
return nil, true
|
||||
}
|
||||
|
||||
preDate := topicData[0].Seq >> 32
|
||||
var findPos int
|
||||
for findPos = 1; findPos < len(topicData); findPos++ {
|
||||
newDate := topicData[findPos].Seq >> 32
|
||||
//说明换天了
|
||||
if preDate != newDate {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
collectName := fmt.Sprintf("%s_%s", topic, mp.GetDateByIndex(topicData[0].Seq))
|
||||
ret := mp.persistTopicData(collectName, topicData[:findPos], retryCount)
|
||||
//如果失败,下次重试
|
||||
if ret == false {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
//如果成功
|
||||
return topicData[findPos:len(topicData)], true
|
||||
}
|
||||
|
||||
// FindTopicData 查找数据
|
||||
func (mp *MongoPersist) findTopicData(topic string, startIndex uint64, limit int64) ([]TopicData, bool) {
|
||||
s := mp.mongo.TakeSession()
|
||||
|
||||
|
||||
condition := bson.D{{Key: "_id", Value: bson.D{{Key: "$gt", Value: startIndex}}}}
|
||||
|
||||
var findOption options.FindOptions
|
||||
findOption.SetLimit(limit)
|
||||
var findOptions []*options.FindOptions
|
||||
findOptions = append(findOptions, &findOption)
|
||||
|
||||
ctx, cancel := s.GetDefaultContext()
|
||||
defer cancel()
|
||||
collectName := fmt.Sprintf("%s_%s", topic, mp.GetDateByIndex(startIndex))
|
||||
cursor, err := s.Collection(mp.dbName, collectName).Find(ctx, condition, findOptions...)
|
||||
if err != nil || cursor.Err() != nil {
|
||||
if err == nil {
|
||||
err = cursor.Err()
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
log.SError("find collect name ", topic, " is error:", err.Error())
|
||||
return nil, false
|
||||
}
|
||||
|
||||
return nil, false
|
||||
}
|
||||
|
||||
var res []interface{}
|
||||
ctxAll, cancelAll := s.GetDefaultContext()
|
||||
defer cancelAll()
|
||||
err = cursor.All(ctxAll, &res)
|
||||
if err != nil {
|
||||
if err != nil {
|
||||
log.SError("find collect name ", topic, " is error:", err.Error())
|
||||
return nil, false
|
||||
}
|
||||
|
||||
return nil, false
|
||||
}
|
||||
|
||||
//序列化返回
|
||||
topicBuff := mp.getTopicBuff(int(limit))
|
||||
for i := 0; i < len(res); i++ {
|
||||
rawData, errM := bson.Marshal(res[i])
|
||||
if errM != nil {
|
||||
if errM != nil {
|
||||
log.SError("collect name ", topic, " Marshal is error:", err.Error())
|
||||
return nil, false
|
||||
}
|
||||
continue
|
||||
}
|
||||
topicBuff = append(topicBuff, TopicData{RawData: rawData})
|
||||
}
|
||||
|
||||
return topicBuff, true
|
||||
}
|
||||
|
||||
func (mp *MongoPersist) IsYesterday(startIndex uint64) (bool,string){
|
||||
timeStamp := int64(startIndex>>32)
|
||||
|
||||
startTime := time.Unix(timeStamp, 0).AddDate(0,0,1)
|
||||
nowTm := time.Now()
|
||||
|
||||
return startTime.Year() == nowTm.Year() && startTime.Month() == nowTm.Month()&&startTime.Day() == nowTm.Day(),nowTm.Format("20060102")
|
||||
}
|
||||
|
||||
func (mp *MongoPersist) getCollectCount(topic string,today string) (int64 ,error){
|
||||
s := mp.mongo.TakeSession()
|
||||
ctx, cancel := s.GetDefaultContext()
|
||||
defer cancel()
|
||||
collectName := fmt.Sprintf("%s_%s", topic, today)
|
||||
count, err := s.Collection(mp.dbName, collectName).EstimatedDocumentCount(ctx)
|
||||
return count,err
|
||||
}
|
||||
|
||||
// FindTopicData 查找数据
|
||||
func (mp *MongoPersist) FindTopicData(topic string, startIndex uint64, limit int64) []TopicData {
|
||||
//某表找不到,一直往前找,找到当前置为止
|
||||
for days := 1; days <= MaxDays; days++ {
|
||||
//是否可以跳天
|
||||
//在换天时,如果记录在其他协程还没insert完成时,因为没查到直接跳到第二天,导致漏掉数据
|
||||
//解决的方法是在换天时,先判断新换的当天有没有记录,有记录时,说明昨天的数据已经插入完成,才可以跳天查询
|
||||
IsJumpDays := true
|
||||
|
||||
//如果是昨天,先判断当天有没有表数据
|
||||
bYesterday,strToday := mp.IsYesterday(startIndex)
|
||||
if bYesterday {
|
||||
count,err := mp.getCollectCount(topic,strToday)
|
||||
if err != nil {
|
||||
//失败时,重新开始
|
||||
log.SError("getCollectCount ",topic,"_",strToday," is fail:",err.Error())
|
||||
return nil
|
||||
}
|
||||
//当天没有记录,则不能跳表,有可能当天还有数据
|
||||
if count == 0 {
|
||||
IsJumpDays = false
|
||||
}
|
||||
}
|
||||
|
||||
//从startIndex开始一直往后查
|
||||
topicData, isSucc := mp.findTopicData(topic, startIndex, limit)
|
||||
//有数据或者数据库出错时返回,返回后,会进行下一轮的查询遍历
|
||||
if len(topicData) > 0 || isSucc == false {
|
||||
return topicData
|
||||
}
|
||||
|
||||
//找不到数据时,判断当前日期是否一致
|
||||
if mp.GetDateByIndex(startIndex) >= mp.GetNowTime() {
|
||||
break
|
||||
}
|
||||
|
||||
//不允许跳天,则直接跳出
|
||||
if IsJumpDays == false {
|
||||
break
|
||||
}
|
||||
|
||||
startIndex = mp.GetNextIndex(startIndex, days)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mp *MongoPersist) GetNowTime() string {
|
||||
now := time.Now()
|
||||
zeroTime := time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, now.Location())
|
||||
return zeroTime.Format("20060102")
|
||||
}
|
||||
|
||||
func (mp *MongoPersist) GetDateByIndex(startIndex uint64) string {
|
||||
startTm := int64(startIndex >> 32)
|
||||
return time.Unix(startTm, 0).Format("20060102")
|
||||
}
|
||||
|
||||
func (mp *MongoPersist) GetNextIndex(startIndex uint64, addDay int) uint64 {
|
||||
startTime := time.Unix(int64(startIndex>>32), 0)
|
||||
dateTime := time.Date(startTime.Year(), startTime.Month(), startTime.Day(), 0, 0, 0, 0, startTime.Location())
|
||||
newDateTime := dateTime.AddDate(0, 0, addDay)
|
||||
nextIndex := uint64(newDateTime.Unix()) << 32
|
||||
return nextIndex
|
||||
}
|
||||
|
||||
// LoadCustomerIndex false时代表获取失败,一般是读取错误,会进行重试。如果不存在时,返回(0,true)
|
||||
func (mp *MongoPersist) LoadCustomerIndex(topic string, customerId string) (uint64, bool) {
|
||||
s := mp.mongo.TakeSession()
|
||||
ctx, cancel := s.GetDefaultContext()
|
||||
defer cancel()
|
||||
|
||||
condition := bson.D{{Key: "Customer", Value: customerId}, {Key: "Topic", Value: topic}}
|
||||
cursor, err := s.Collection(mp.dbName, CustomerCollectName).Find(ctx, condition)
|
||||
if err != nil {
|
||||
log.SError("Load topic ", topic, " customer ", customerId, " is fail:", err.Error())
|
||||
return 0, false
|
||||
}
|
||||
|
||||
type findRes struct {
|
||||
Index uint64 `bson:"Index,omitempty"`
|
||||
}
|
||||
|
||||
var res []findRes
|
||||
ctxAll, cancelAll := s.GetDefaultContext()
|
||||
defer cancelAll()
|
||||
err = cursor.All(ctxAll, &res)
|
||||
if err != nil {
|
||||
log.SError("Load topic ", topic, " customer ", customerId, " is fail:", err.Error())
|
||||
return 0, false
|
||||
}
|
||||
|
||||
if len(res) == 0 {
|
||||
return 0, true
|
||||
}
|
||||
|
||||
return res[0].Index, true
|
||||
}
|
||||
|
||||
// GetIndex 通过topic数据获取进度索引号
|
||||
func (mp *MongoPersist) GetIndex(topicData *TopicData) uint64 {
|
||||
if topicData.Seq > 0 {
|
||||
return topicData.Seq
|
||||
}
|
||||
|
||||
var document bson.D
|
||||
err := bson.Unmarshal(topicData.RawData, &document)
|
||||
if err != nil {
|
||||
log.SError("GetIndex is fail ", err.Error())
|
||||
return 0
|
||||
}
|
||||
|
||||
for _, e := range document {
|
||||
if e.Key == "_id" {
|
||||
errC, seq := util.ConvertToNumber[uint64](e.Value)
|
||||
if errC != nil {
|
||||
log.Error("value is error:%s,%+v, ", errC.Error(), e.Value)
|
||||
}
|
||||
|
||||
return seq
|
||||
}
|
||||
}
|
||||
return topicData.Seq
|
||||
}
|
||||
|
||||
// PersistIndex 持久化进度索引号
|
||||
func (mp *MongoPersist) PersistIndex(topic string, customerId string, index uint64) {
|
||||
s := mp.mongo.TakeSession()
|
||||
|
||||
condition := bson.D{{Key: "Customer", Value: customerId}, {Key: "Topic", Value: topic}}
|
||||
upsert := bson.M{"Customer": customerId, "Topic": topic, "Index": index}
|
||||
updata := bson.M{"$set": upsert}
|
||||
|
||||
var UpdateOptionsOpts []*options.UpdateOptions
|
||||
UpdateOptionsOpts = append(UpdateOptionsOpts, options.Update().SetUpsert(true))
|
||||
|
||||
ctx, cancel := s.GetDefaultContext()
|
||||
defer cancel()
|
||||
ret, err := s.Collection(mp.dbName, CustomerCollectName).UpdateOne(ctx, condition, updata, UpdateOptionsOpts...)
|
||||
fmt.Println(ret)
|
||||
if err != nil {
|
||||
log.SError("PersistIndex fail :", err.Error())
|
||||
}
|
||||
}
|
||||
122
sysservice/messagequeueservice/MongoPersist_test.go
Normal file
122
sysservice/messagequeueservice/MongoPersist_test.go
Normal file
@@ -0,0 +1,122 @@
|
||||
package messagequeueservice
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go.mongodb.org/mongo-driver/bson"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
var seq uint64
|
||||
var lastTime int64
|
||||
|
||||
func NextSeq(addDays int) uint64 {
|
||||
now := time.Now().AddDate(0, 0, addDays)
|
||||
|
||||
nowSec := now.Unix()
|
||||
if nowSec != lastTime {
|
||||
seq = 0
|
||||
lastTime = nowSec
|
||||
}
|
||||
//必需从1开始,查询时seq>0
|
||||
seq += 1
|
||||
|
||||
return uint64(nowSec)<<32 | uint64(seq)
|
||||
}
|
||||
|
||||
func Test_MongoPersist(t *testing.T) {
|
||||
//1.初始化
|
||||
var mongoPersist MongoPersist
|
||||
mongoPersist.url = "mongodb://admin:123456@192.168.2.15:27017/?minPoolSize=5&maxPoolSize=35&maxIdleTimeMS=30000"
|
||||
mongoPersist.dbName = "MongoPersistTest"
|
||||
mongoPersist.retryCount = 10
|
||||
mongoPersist.OnInit()
|
||||
|
||||
//2.
|
||||
//加载索引
|
||||
index, ret := mongoPersist.LoadCustomerIndex("TestTopic", "TestCustomer")
|
||||
fmt.Println(index, ret)
|
||||
|
||||
now := time.Now()
|
||||
zeroTime := time.Date(now.Year(), now.Month(), now.Day()+1, 0, 0, 0, 0, now.Location())
|
||||
//fmt.Println(zeroTime.Unix())
|
||||
startIndex := uint64(zeroTime.Unix()<<32) | 1
|
||||
|
||||
//存储索引
|
||||
mongoPersist.PersistIndex("TestTopic", "TestCustomer", startIndex)
|
||||
|
||||
//加载索引
|
||||
index, ret = mongoPersist.LoadCustomerIndex("TestTopic", "TestCustomer")
|
||||
|
||||
type RowTest struct {
|
||||
Name string `bson:"Name,omitempty"`
|
||||
MapTest map[int]int `bson:"MapTest,omitempty"`
|
||||
Message string `bson:"Message,omitempty"`
|
||||
}
|
||||
|
||||
type RowTest2 struct {
|
||||
Id uint64 `bson:"_id,omitempty"`
|
||||
Name string `bson:"Name,omitempty"`
|
||||
MapTest map[int]int `bson:"MapTest,omitempty"`
|
||||
Message string `bson:"Message,omitempty"`
|
||||
}
|
||||
|
||||
//存档
|
||||
var findStartIndex uint64
|
||||
var topicData []TopicData
|
||||
for i := 1; i <= 1000; i++ {
|
||||
|
||||
var rowTest RowTest
|
||||
rowTest.Name = fmt.Sprintf("Name_%d", i)
|
||||
rowTest.MapTest = make(map[int]int, 1)
|
||||
rowTest.MapTest[i] = i*1000 + i
|
||||
rowTest.Message = fmt.Sprintf("xxxxxxxxxxxxxxxxxx%d", i)
|
||||
byteRet, _ := bson.Marshal(rowTest)
|
||||
|
||||
var dataSeq uint64
|
||||
if i <= 500 {
|
||||
dataSeq = NextSeq(-1)
|
||||
} else {
|
||||
dataSeq = NextSeq(0)
|
||||
}
|
||||
|
||||
topicData = append(topicData, TopicData{RawData: byteRet, Seq: dataSeq})
|
||||
|
||||
if i == 1 {
|
||||
findStartIndex = topicData[0].Seq
|
||||
}
|
||||
}
|
||||
|
||||
mongoPersist.OnReceiveTopicData("TestTopic", topicData)
|
||||
|
||||
for {
|
||||
if len(topicData) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
topicData, ret = mongoPersist.PersistTopicData("TestTopic", topicData, 1)
|
||||
fmt.Println(ret)
|
||||
}
|
||||
|
||||
//
|
||||
for {
|
||||
retTopicData := mongoPersist.FindTopicData("TestTopic", findStartIndex, 300)
|
||||
for i, data := range retTopicData {
|
||||
var rowTest RowTest2
|
||||
bson.Unmarshal(data.RawData, &rowTest)
|
||||
t.Log(rowTest.Name)
|
||||
|
||||
if i == len(retTopicData)-1 {
|
||||
findStartIndex = mongoPersist.GetIndex(&data)
|
||||
}
|
||||
}
|
||||
|
||||
t.Log("..................")
|
||||
if len(retTopicData) == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
//t.Log(mongoPersist.GetIndex(&retTopicData[0]))
|
||||
//t.Log(mongoPersist.GetIndex(&retTopicData[len(retTopicData)-1]))
|
||||
}
|
||||
91
sysservice/messagequeueservice/Subscriber.go
Normal file
91
sysservice/messagequeueservice/Subscriber.go
Normal file
@@ -0,0 +1,91 @@
|
||||
package messagequeueservice
|
||||
|
||||
import (
|
||||
"github.com/duanhf2012/origin/log"
|
||||
"github.com/duanhf2012/origin/rpc"
|
||||
|
||||
"sync"
|
||||
)
|
||||
|
||||
// 订阅器
|
||||
type Subscriber struct {
|
||||
customerLocker sync.RWMutex
|
||||
mapCustomer map[string]*CustomerSubscriber
|
||||
queue MemoryQueue
|
||||
dataPersist QueueDataPersist //对列数据处理器
|
||||
queueWait *sync.WaitGroup
|
||||
}
|
||||
|
||||
func (ss *Subscriber) Init(memoryQueueCap int32) {
|
||||
ss.queue.Init(memoryQueueCap)
|
||||
ss.mapCustomer = make(map[string]*CustomerSubscriber, 5)
|
||||
}
|
||||
|
||||
func (ss *Subscriber) PushTopicDataToQueue(topic string, topics []TopicData) {
|
||||
for i := 0; i < len(topics); i++ {
|
||||
ss.queue.Push(&topics[i])
|
||||
}
|
||||
}
|
||||
|
||||
func (ss *Subscriber) PersistTopicData(topic string, topics []TopicData, retryCount int) ([]TopicData, bool) {
|
||||
return ss.dataPersist.PersistTopicData(topic, topics, retryCount)
|
||||
}
|
||||
|
||||
func (ss *Subscriber) TopicSubscribe(rpcHandler rpc.IRpcHandler, subScribeType rpc.SubscribeType, subscribeMethod SubscribeMethod, fromNodeId int, callBackRpcMethod string, topic string, customerId string, StartIndex uint64, oneBatchQuantity int32) error {
|
||||
//取消订阅时
|
||||
if subScribeType == rpc.SubscribeType_Unsubscribe {
|
||||
ss.UnSubscribe(customerId)
|
||||
return nil
|
||||
} else {
|
||||
ss.customerLocker.Lock()
|
||||
customerSubscriber, ok := ss.mapCustomer[customerId]
|
||||
if ok == true {
|
||||
//已经订阅过,则取消订阅
|
||||
customerSubscriber.UnSubscribe()
|
||||
delete(ss.mapCustomer, customerId)
|
||||
}
|
||||
|
||||
//不存在,则订阅
|
||||
customerSubscriber = &CustomerSubscriber{}
|
||||
ss.mapCustomer[customerId] = customerSubscriber
|
||||
ss.customerLocker.Unlock()
|
||||
|
||||
err := customerSubscriber.Subscribe(rpcHandler, ss, topic, subscribeMethod, customerId, fromNodeId, callBackRpcMethod, StartIndex, oneBatchQuantity)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if ok == true {
|
||||
log.SRelease("repeat subscription for customer ", customerId)
|
||||
} else {
|
||||
log.SRelease("subscription for customer ", customerId)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ss *Subscriber) UnSubscribe(customerId string) {
|
||||
ss.customerLocker.RLocker()
|
||||
defer ss.customerLocker.RUnlock()
|
||||
|
||||
customerSubscriber, ok := ss.mapCustomer[customerId]
|
||||
if ok == false {
|
||||
log.SWarning("failed to unsubscribe customer " + customerId)
|
||||
return
|
||||
}
|
||||
|
||||
customerSubscriber.UnSubscribe()
|
||||
}
|
||||
|
||||
func (ss *Subscriber) removeCustomer(customerId string, cs *CustomerSubscriber) {
|
||||
|
||||
ss.customerLocker.Lock()
|
||||
//确保删掉是当前的关系。有可能在替换订阅时,将该customer替换的情况
|
||||
customer, _ := ss.mapCustomer[customerId]
|
||||
if customer == cs {
|
||||
delete(ss.mapCustomer, customerId)
|
||||
}
|
||||
ss.customerLocker.Unlock()
|
||||
}
|
||||
146
sysservice/messagequeueservice/TopicRoom.go
Normal file
146
sysservice/messagequeueservice/TopicRoom.go
Normal file
@@ -0,0 +1,146 @@
|
||||
package messagequeueservice
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"github.com/duanhf2012/origin/log"
|
||||
"github.com/duanhf2012/origin/util/coroutine"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
type TopicData struct {
|
||||
Seq uint64 //序号
|
||||
RawData []byte //原始数据
|
||||
|
||||
ExtendParam interface{} //扩展参数
|
||||
}
|
||||
|
||||
func (t TopicData) GetValue() uint64 {
|
||||
return t.Seq
|
||||
}
|
||||
|
||||
var topicFullError = errors.New("topic room is full")
|
||||
|
||||
const DefaultOnceProcessTopicDataNum = 1024 //一次处理的topic数量,考虑批量落地的数量
|
||||
const DefaultMaxTopicBacklogNum = 100000 //处理的channel最大数量
|
||||
const DefaultMemoryQueueLen = 50000 //内存的最大长度
|
||||
const maxTryPersistNum = 3000 //最大重试次数,约>5分钟
|
||||
|
||||
type TopicRoom struct {
|
||||
topic string //主题名称
|
||||
channelTopic chan TopicData //主题push过来待处理的数据
|
||||
|
||||
Subscriber //订阅器
|
||||
|
||||
//序号生成
|
||||
seq uint32
|
||||
lastTime int64
|
||||
|
||||
//onceProcessTopicDataNum int //一次处理的订阅数据最大量,方便订阅器Subscriber和QueueDataProcessor批量处理
|
||||
StagingBuff []TopicData
|
||||
|
||||
isStop int32
|
||||
}
|
||||
|
||||
// maxProcessTopicBacklogNum:主题最大积压数量
|
||||
func (tr *TopicRoom) Init(maxTopicBacklogNum int32, memoryQueueLen int32, topic string, queueWait *sync.WaitGroup, dataPersist QueueDataPersist) {
|
||||
if maxTopicBacklogNum == 0 {
|
||||
maxTopicBacklogNum = DefaultMaxTopicBacklogNum
|
||||
}
|
||||
|
||||
tr.channelTopic = make(chan TopicData, maxTopicBacklogNum)
|
||||
tr.topic = topic
|
||||
tr.dataPersist = dataPersist
|
||||
tr.queueWait = queueWait
|
||||
tr.StagingBuff = make([]TopicData, DefaultOnceProcessTopicDataNum)
|
||||
tr.queueWait.Add(1)
|
||||
tr.Subscriber.Init(memoryQueueLen)
|
||||
coroutine.GoRecover(tr.topicRoomRun, -1)
|
||||
}
|
||||
|
||||
func (tr *TopicRoom) Publish(data [][]byte) error {
|
||||
if len(tr.channelTopic)+len(data) > cap(tr.channelTopic) {
|
||||
return topicFullError
|
||||
}
|
||||
|
||||
//生成有序序号
|
||||
for _, rawData := range data {
|
||||
tr.channelTopic <- TopicData{RawData: rawData, Seq: tr.NextSeq()}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tr *TopicRoom) NextSeq() uint64 {
|
||||
now := time.Now()
|
||||
|
||||
nowSec := now.Unix()
|
||||
if nowSec != tr.lastTime {
|
||||
tr.seq = 0
|
||||
tr.lastTime = nowSec
|
||||
}
|
||||
//必需从1开始,查询时seq>0
|
||||
tr.seq += 1
|
||||
|
||||
return uint64(nowSec)<<32 | uint64(tr.seq)
|
||||
}
|
||||
|
||||
func (tr *TopicRoom) Stop() {
|
||||
atomic.StoreInt32(&tr.isStop, 1)
|
||||
}
|
||||
|
||||
func (tr *TopicRoom) topicRoomRun() {
|
||||
defer tr.queueWait.Done()
|
||||
|
||||
log.SRelease("topic room ", tr.topic, " is running..")
|
||||
for {
|
||||
if atomic.LoadInt32(&tr.isStop) != 0 {
|
||||
break
|
||||
}
|
||||
stagingBuff := tr.StagingBuff[:0]
|
||||
|
||||
for i := 0; i < len(tr.channelTopic) && i < DefaultOnceProcessTopicDataNum; i++ {
|
||||
topicData := <-tr.channelTopic
|
||||
|
||||
stagingBuff = append(stagingBuff, topicData)
|
||||
}
|
||||
tr.Subscriber.dataPersist.OnReceiveTopicData(tr.topic, stagingBuff)
|
||||
//持久化与放内存
|
||||
if len(stagingBuff) == 0 {
|
||||
time.Sleep(time.Millisecond * 50)
|
||||
continue
|
||||
}
|
||||
|
||||
//如果落地失败,最大重试maxTryPersistNum次数
|
||||
var ret bool
|
||||
for j := 0; j < maxTryPersistNum; {
|
||||
//持久化处理
|
||||
stagingBuff, ret = tr.PersistTopicData(tr.topic, stagingBuff, j+1)
|
||||
//如果存档成功,并且有后续批次,则继续存档
|
||||
if ret == true && len(stagingBuff) > 0 {
|
||||
//二次存档不计次数
|
||||
continue
|
||||
}
|
||||
|
||||
//计数增加一次,并且等待100ms,继续重试
|
||||
j += 1
|
||||
if ret == false {
|
||||
time.Sleep(time.Millisecond * 100)
|
||||
continue
|
||||
}
|
||||
|
||||
tr.PushTopicDataToQueue(tr.topic, stagingBuff)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
//将所有的订阅取消
|
||||
tr.customerLocker.Lock()
|
||||
for _, customer := range tr.mapCustomer {
|
||||
customer.UnSubscribe()
|
||||
}
|
||||
tr.customerLocker.Unlock()
|
||||
|
||||
log.SRelease("topic room ", tr.topic, " is stop")
|
||||
}
|
||||
415
sysservice/rankservice/MongodbPersist.go
Normal file
415
sysservice/rankservice/MongodbPersist.go
Normal file
@@ -0,0 +1,415 @@
|
||||
package rankservice
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/duanhf2012/origin/log"
|
||||
"github.com/duanhf2012/origin/rpc"
|
||||
"github.com/duanhf2012/origin/service"
|
||||
"github.com/duanhf2012/origin/sysmodule/mongodbmodule"
|
||||
"github.com/duanhf2012/origin/util/coroutine"
|
||||
"go.mongodb.org/mongo-driver/bson"
|
||||
"go.mongodb.org/mongo-driver/mongo/options"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
const batchRemoveNum = 128 //一切删除的最大数量
|
||||
|
||||
// RankDataDB 排行表数据
|
||||
type RankDataDB struct {
|
||||
Id uint64 `bson:"_id,omitempty"`
|
||||
RefreshTime int64 `bson:"RefreshTime,omitempty"`
|
||||
SortData []int64 `bson:"SortData,omitempty"`
|
||||
Data []byte `bson:"Data,omitempty"`
|
||||
}
|
||||
|
||||
// MongoPersist持久化Module
|
||||
type MongoPersist struct {
|
||||
service.Module
|
||||
mongo mongodbmodule.MongoModule
|
||||
|
||||
url string //Mongodb连接url
|
||||
dbName string //数据库名称
|
||||
SaveInterval time.Duration //落地数据库时间间隔
|
||||
|
||||
sync.Mutex
|
||||
mapRemoveRankData map[uint64]map[uint64]struct{} //将要删除的排行数据 map[RankId]map[Key]struct{}
|
||||
mapUpsertRankData map[uint64]map[uint64]RankData //需要upsert的排行数据 map[RankId][key]RankData
|
||||
|
||||
mapRankSkip map[uint64]IRankSkip //所有的排行榜对象map[RankId]IRankSkip
|
||||
maxRetrySaveCount int //存档重试次数
|
||||
retryTimeIntervalMs time.Duration //重试时间间隔
|
||||
|
||||
lastSaveTime time.Time //最后一次存档时间
|
||||
|
||||
stop int32 //是否停服
|
||||
waitGroup sync.WaitGroup //等待停服
|
||||
}
|
||||
|
||||
func (mp *MongoPersist) OnInit() error {
|
||||
mp.mapRemoveRankData = map[uint64]map[uint64]struct{}{}
|
||||
mp.mapUpsertRankData = map[uint64]map[uint64]RankData{}
|
||||
mp.mapRankSkip = map[uint64]IRankSkip{}
|
||||
|
||||
if errC := mp.ReadCfg(); errC != nil {
|
||||
return errC
|
||||
}
|
||||
|
||||
//初始化MongoDB
|
||||
err := mp.mongo.Init(mp.url, time.Second*15)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
//开始运行
|
||||
err = mp.mongo.Start()
|
||||
if err != nil {
|
||||
log.SError("start dbService[", mp.dbName, "], url[", mp.url, "] init error:", err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
//开启协程
|
||||
coroutine.GoRecover(mp.persistCoroutine,-1)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mp *MongoPersist) ReadCfg() error {
|
||||
mapDBServiceCfg, ok := mp.GetService().GetServiceCfg().(map[string]interface{})
|
||||
if ok == false {
|
||||
return fmt.Errorf("RankService config is error")
|
||||
}
|
||||
|
||||
//读取数据库配置
|
||||
saveMongoCfg,ok := mapDBServiceCfg["SaveMongo"]
|
||||
if ok == false {
|
||||
return fmt.Errorf("RankService.SaveMongo config is error")
|
||||
}
|
||||
|
||||
mongodbCfg,ok := saveMongoCfg.(map[string]interface{})
|
||||
if ok == false {
|
||||
return fmt.Errorf("RankService.SaveMongo config is error")
|
||||
}
|
||||
|
||||
url, ok := mongodbCfg["Url"]
|
||||
if ok == false {
|
||||
return fmt.Errorf("RankService.SaveMongo.Url config is error")
|
||||
}
|
||||
mp.url = url.(string)
|
||||
|
||||
dbName, ok := mongodbCfg["DBName"]
|
||||
if ok == false {
|
||||
return fmt.Errorf("RankService.SaveMongo.DBName config is error")
|
||||
}
|
||||
mp.dbName = dbName.(string)
|
||||
|
||||
saveInterval, ok := mongodbCfg["SaveIntervalMs"]
|
||||
if ok == false {
|
||||
return fmt.Errorf("RankService.SaveMongo.SaveIntervalMs config is error")
|
||||
}
|
||||
|
||||
mp.SaveInterval = time.Duration(saveInterval.(float64))*time.Millisecond
|
||||
|
||||
maxRetrySaveCount, ok := mongodbCfg["MaxRetrySaveCount"]
|
||||
if ok == false {
|
||||
return fmt.Errorf("RankService.SaveMongo.MaxRetrySaveCount config is error")
|
||||
}
|
||||
mp.maxRetrySaveCount = int(maxRetrySaveCount.(float64))
|
||||
|
||||
retryTimeIntervalMs, ok := mongodbCfg["RetryTimeIntervalMs"]
|
||||
if ok == false {
|
||||
return fmt.Errorf("RankService.SaveMongo.RetryTimeIntervalMs config is error")
|
||||
}
|
||||
mp.retryTimeIntervalMs = time.Duration(retryTimeIntervalMs.(float64))*time.Millisecond
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
//启服从数据库加载
|
||||
func (mp *MongoPersist) OnStart() {
|
||||
}
|
||||
|
||||
func (mp *MongoPersist) OnSetupRank(manual bool,rankSkip *RankSkip) error{
|
||||
if mp.mapRankSkip == nil {
|
||||
mp.mapRankSkip = map[uint64]IRankSkip{}
|
||||
}
|
||||
|
||||
mp.mapRankSkip[rankSkip.GetRankID()] = rankSkip
|
||||
if manual == true {
|
||||
return nil
|
||||
}
|
||||
|
||||
log.SRelease("start load rank ",rankSkip.GetRankName()," from mongodb.")
|
||||
err := mp.loadFromDB(rankSkip.GetRankID(),rankSkip.GetRankName())
|
||||
if err != nil {
|
||||
log.SError("load from db is fail :%s",err.Error())
|
||||
return err
|
||||
}
|
||||
log.SRelease("finish load rank ",rankSkip.GetRankName()," from mongodb.")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mp *MongoPersist) loadFromDB(rankId uint64,rankCollectName string) error{
|
||||
s := mp.mongo.TakeSession()
|
||||
ctx, cancel := s.GetDefaultContext()
|
||||
defer cancel()
|
||||
|
||||
condition := bson.D{}
|
||||
cursor, err := s.Collection(mp.dbName, rankCollectName).Find(ctx, condition)
|
||||
if err != nil {
|
||||
log.SError("find collect name ", rankCollectName, " is error:", err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
if cursor.Err()!=nil {
|
||||
log.SError("find collect name ", rankCollectName, " is error:", cursor.Err().Error())
|
||||
return err
|
||||
}
|
||||
|
||||
rankSkip := mp.mapRankSkip[rankId]
|
||||
if rankSkip == nil {
|
||||
err = fmt.Errorf("rank ", rankCollectName, " is not setup:")
|
||||
log.SError(err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
defer cursor.Close(ctx)
|
||||
for cursor.Next(ctx) {
|
||||
var rankDataDB RankDataDB
|
||||
err = cursor.Decode(&rankDataDB)
|
||||
if err != nil {
|
||||
log.SError(" collect name ", rankCollectName, " Decode is error:", err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
var rankData rpc.RankData
|
||||
rankData.Data = rankDataDB.Data
|
||||
rankData.Key = rankDataDB.Id
|
||||
rankData.SortData = rankDataDB.SortData
|
||||
|
||||
//更新到排行榜
|
||||
rankSkip.UpsetRank(&rankData,rankDataDB.RefreshTime,true)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mp *MongoPersist) lazyInitRemoveMap(rankId uint64){
|
||||
if mp.mapRemoveRankData[rankId] == nil {
|
||||
mp.mapRemoveRankData[rankId] = make(map[uint64]struct{},256)
|
||||
}
|
||||
}
|
||||
|
||||
func (mp *MongoPersist) lazyInitUpsertMap(rankId uint64){
|
||||
if mp.mapUpsertRankData[rankId] == nil {
|
||||
mp.mapUpsertRankData[rankId] = make(map[uint64]RankData,256)
|
||||
}
|
||||
}
|
||||
|
||||
func (mp *MongoPersist) OnEnterRank(rankSkip IRankSkip, enterData *RankData){
|
||||
mp.Lock()
|
||||
defer mp.Unlock()
|
||||
|
||||
delete(mp.mapRemoveRankData,enterData.Key)
|
||||
|
||||
mp.lazyInitUpsertMap(rankSkip.GetRankID())
|
||||
mp.mapUpsertRankData[rankSkip.GetRankID()][enterData.Key] = *enterData
|
||||
}
|
||||
|
||||
|
||||
func (mp *MongoPersist) OnLeaveRank(rankSkip IRankSkip, leaveData *RankData){
|
||||
mp.Lock()
|
||||
defer mp.Unlock()
|
||||
|
||||
//先删掉更新中的数据
|
||||
delete(mp.mapUpsertRankData,leaveData.Key)
|
||||
mp.lazyInitRemoveMap(rankSkip.GetRankID())
|
||||
mp.mapRemoveRankData[rankSkip.GetRankID()][leaveData.Key] = struct{}{}
|
||||
}
|
||||
|
||||
func (mp *MongoPersist) OnChangeRankData(rankSkip IRankSkip, changeData *RankData){
|
||||
mp.Lock()
|
||||
defer mp.Unlock()
|
||||
|
||||
//先删掉要删除的数据
|
||||
delete(mp.mapRemoveRankData,changeData.Key)
|
||||
|
||||
//更新数据
|
||||
mp.lazyInitUpsertMap(rankSkip.GetRankID())
|
||||
mp.mapUpsertRankData[rankSkip.GetRankID()][changeData.Key] = *changeData
|
||||
}
|
||||
|
||||
//停存持久化到DB
|
||||
func (mp *MongoPersist) OnStop(mapRankSkip map[uint64]*RankSkip){
|
||||
atomic.StoreInt32(&mp.stop,1)
|
||||
mp.waitGroup.Wait()
|
||||
}
|
||||
|
||||
func (mp *MongoPersist) JugeTimeoutSave() bool{
|
||||
timeout := time.Now()
|
||||
isTimeOut := timeout.Sub(mp.lastSaveTime) >= mp.SaveInterval
|
||||
if isTimeOut == true {
|
||||
mp.lastSaveTime = timeout
|
||||
}
|
||||
|
||||
return isTimeOut
|
||||
}
|
||||
|
||||
func (mp *MongoPersist) persistCoroutine(){
|
||||
mp.waitGroup.Add(1)
|
||||
defer mp.waitGroup.Done()
|
||||
for atomic.LoadInt32(&mp.stop)==0 || mp.hasPersistData(){
|
||||
//间隔时间sleep
|
||||
time.Sleep(time.Second*1)
|
||||
|
||||
//没有持久化数据continue
|
||||
if mp.hasPersistData() == false {
|
||||
continue
|
||||
}
|
||||
|
||||
if mp.JugeTimeoutSave() == false{
|
||||
continue
|
||||
}
|
||||
|
||||
//存档数据到数据库
|
||||
mp.saveToDB()
|
||||
}
|
||||
|
||||
//退出时存一次档
|
||||
mp.saveToDB()
|
||||
}
|
||||
|
||||
func (mp *MongoPersist) hasPersistData() bool{
|
||||
mp.Lock()
|
||||
defer mp.Unlock()
|
||||
|
||||
return len(mp.mapUpsertRankData)>0 || len(mp.mapRemoveRankData) >0
|
||||
}
|
||||
|
||||
func (mp *MongoPersist) saveToDB(){
|
||||
//1.copy数据
|
||||
mp.Lock()
|
||||
mapRemoveRankData := mp.mapRemoveRankData
|
||||
mapUpsertRankData := mp.mapUpsertRankData
|
||||
mp.mapRemoveRankData = map[uint64]map[uint64]struct{}{}
|
||||
mp.mapUpsertRankData = map[uint64]map[uint64]RankData{}
|
||||
mp.Unlock()
|
||||
|
||||
//2.存档
|
||||
for len(mapUpsertRankData) > 0 {
|
||||
mp.upsertRankDataToDB(mapUpsertRankData)
|
||||
}
|
||||
|
||||
for len(mapRemoveRankData) >0 {
|
||||
mp.removeRankDataToDB(mapRemoveRankData)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (mp *MongoPersist) removeToDB(collectName string,keys []uint64) error{
|
||||
s := mp.mongo.TakeSession()
|
||||
ctx, cancel := s.GetDefaultContext()
|
||||
defer cancel()
|
||||
|
||||
condition := bson.D{{Key: "_id", Value: bson.M{"$in": keys}}}
|
||||
|
||||
_, err := s.Collection(mp.dbName, collectName).DeleteMany(ctx, condition)
|
||||
if err != nil {
|
||||
log.SError("MongoPersist DeleteMany fail,collect name is ", collectName)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mp *MongoPersist) removeRankData(rankId uint64,keys []uint64) bool {
|
||||
rank := mp.mapRankSkip[rankId]
|
||||
if rank== nil {
|
||||
log.SError("cannot find rankId ",rankId,"config")
|
||||
return false
|
||||
}
|
||||
|
||||
//不成功则重试maxRetrySaveCount次
|
||||
for i:=0;i<mp.maxRetrySaveCount;i++{
|
||||
if mp.removeToDB(rank.GetRankName(),keys)!= nil {
|
||||
time.Sleep(mp.retryTimeIntervalMs)
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (mp *MongoPersist) upsertToDB(collectName string,rankData *RankData) error{
|
||||
condition := bson.D{{"_id", rankData.Key}}
|
||||
upsert := bson.M{"_id":rankData.Key,"RefreshTime": rankData.refreshTimestamp, "SortData": rankData.SortData, "Data": rankData.Data}
|
||||
update := bson.M{"$set": upsert}
|
||||
|
||||
s := mp.mongo.TakeSession()
|
||||
ctx, cancel := s.GetDefaultContext()
|
||||
defer cancel()
|
||||
|
||||
updateOpts := options.Update().SetUpsert(true)
|
||||
_, err := s.Collection(mp.dbName, collectName).UpdateOne(ctx, condition,update,updateOpts)
|
||||
if err != nil {
|
||||
log.SError("MongoPersist upsertDB fail,collect name is ", collectName)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mp *MongoPersist) upsertRankDataToDB(mapUpsertRankData map[uint64]map[uint64]RankData) error{
|
||||
for rankId,mapRankData := range mapUpsertRankData{
|
||||
rank,ok := mp.mapRankSkip[rankId]
|
||||
if ok == false {
|
||||
log.SError("cannot find rankId ",rankId,",config is error")
|
||||
delete(mapUpsertRankData,rankId)
|
||||
continue
|
||||
}
|
||||
|
||||
for key,rankData := range mapRankData{
|
||||
//最大重试mp.maxRetrySaveCount次
|
||||
for i:=0;i<mp.maxRetrySaveCount;i++{
|
||||
err := mp.upsertToDB(rank.GetRankName(),&rankData)
|
||||
if err != nil {
|
||||
time.Sleep(mp.retryTimeIntervalMs)
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
//存完删掉指定key
|
||||
delete(mapRankData,key)
|
||||
}
|
||||
|
||||
if len(mapRankData) == 0 {
|
||||
delete(mapUpsertRankData,rankId)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mp *MongoPersist) removeRankDataToDB(mapRemoveRankData map[uint64]map[uint64]struct{}) {
|
||||
for rankId ,mapRemoveKey := range mapRemoveRankData{
|
||||
//每100个一删
|
||||
keyList := make([]uint64,0,batchRemoveNum)
|
||||
for key := range mapRemoveKey {
|
||||
delete(mapRemoveKey,key)
|
||||
keyList = append(keyList,key)
|
||||
if len(keyList) >= batchRemoveNum {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
mp.removeRankData(rankId,keyList)
|
||||
|
||||
//如果删完,删掉rankid下所有
|
||||
if len(mapRemoveKey) == 0 {
|
||||
delete(mapRemoveRankData,rankId)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
98
sysservice/rankservice/RankData.go
Normal file
98
sysservice/rankservice/RankData.go
Normal file
@@ -0,0 +1,98 @@
|
||||
package rankservice
|
||||
|
||||
import (
|
||||
"github.com/duanhf2012/origin/rpc"
|
||||
"github.com/duanhf2012/origin/util/algorithms/skip"
|
||||
"github.com/duanhf2012/origin/util/sync"
|
||||
)
|
||||
|
||||
var emptyRankData RankData
|
||||
|
||||
var RankDataPool = sync.NewPoolEx(make(chan sync.IPoolData, 10240), func() sync.IPoolData {
|
||||
var newRankData RankData
|
||||
return &newRankData
|
||||
})
|
||||
|
||||
type RankData struct {
|
||||
*rpc.RankData
|
||||
refreshTimestamp int64 //刷新时间
|
||||
//bRelease bool
|
||||
ref bool
|
||||
compareFunc func(other skip.Comparator) int
|
||||
}
|
||||
|
||||
func NewRankData(isDec bool, data *rpc.RankData,refreshTimestamp int64) *RankData {
|
||||
ret := RankDataPool.Get().(*RankData)
|
||||
ret.compareFunc = ret.ascCompare
|
||||
if isDec {
|
||||
ret.compareFunc = ret.desCompare
|
||||
}
|
||||
ret.RankData = data
|
||||
ret.refreshTimestamp = refreshTimestamp
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
func ReleaseRankData(rankData *RankData) {
|
||||
RankDataPool.Put(rankData)
|
||||
}
|
||||
|
||||
func (p *RankData) Reset() {
|
||||
*p = emptyRankData
|
||||
}
|
||||
|
||||
func (p *RankData) IsRef() bool {
|
||||
return p.ref
|
||||
}
|
||||
|
||||
func (p *RankData) Ref() {
|
||||
p.ref = true
|
||||
}
|
||||
|
||||
func (p *RankData) UnRef() {
|
||||
p.ref = false
|
||||
}
|
||||
|
||||
func (p *RankData) Compare(other skip.Comparator) int {
|
||||
return p.compareFunc(other)
|
||||
}
|
||||
|
||||
func (p *RankData) GetKey() uint64 {
|
||||
return p.Key
|
||||
}
|
||||
|
||||
func (p *RankData) ascCompare(other skip.Comparator) int {
|
||||
otherRankData := other.(*RankData)
|
||||
|
||||
if otherRankData.Key == p.Key {
|
||||
return 0
|
||||
}
|
||||
|
||||
retFlg := compareMoreThan(p.SortData, otherRankData.SortData)
|
||||
if retFlg == 0 {
|
||||
if p.Key > otherRankData.Key {
|
||||
retFlg = 1
|
||||
} else {
|
||||
retFlg = -1
|
||||
}
|
||||
}
|
||||
return retFlg
|
||||
}
|
||||
|
||||
func (p *RankData) desCompare(other skip.Comparator) int {
|
||||
otherRankData := other.(*RankData)
|
||||
|
||||
if otherRankData.Key == p.Key {
|
||||
return 0
|
||||
}
|
||||
|
||||
retFlg := compareMoreThan(otherRankData.SortData, p.SortData)
|
||||
if retFlg == 0 {
|
||||
if p.Key > otherRankData.Key {
|
||||
retFlg = -1
|
||||
} else {
|
||||
retFlg = 1
|
||||
}
|
||||
}
|
||||
return retFlg
|
||||
}
|
||||
125
sysservice/rankservice/RankDataExpire.go
Normal file
125
sysservice/rankservice/RankDataExpire.go
Normal file
@@ -0,0 +1,125 @@
|
||||
package rankservice
|
||||
|
||||
import (
|
||||
"container/heap"
|
||||
"github.com/duanhf2012/origin/util/sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var expireDataPool = sync.NewPoolEx(make(chan sync.IPoolData, 10240), func() sync.IPoolData {
|
||||
return &ExpireData{}
|
||||
})
|
||||
|
||||
type ExpireData struct {
|
||||
Index int
|
||||
Key uint64
|
||||
RefreshTimestamp int64
|
||||
ref bool
|
||||
}
|
||||
|
||||
type rankDataHeap struct {
|
||||
rankDatas []*ExpireData
|
||||
expireMs int64
|
||||
mapExpireData map[uint64]*ExpireData
|
||||
}
|
||||
|
||||
var expireData ExpireData
|
||||
func (ed *ExpireData) Reset(){
|
||||
*ed = expireData
|
||||
}
|
||||
|
||||
func (ed *ExpireData) IsRef() bool{
|
||||
return ed.ref
|
||||
}
|
||||
|
||||
func (ed *ExpireData) Ref(){
|
||||
ed.ref = true
|
||||
}
|
||||
|
||||
func (ed *ExpireData) UnRef(){
|
||||
ed.ref = false
|
||||
}
|
||||
|
||||
func (rd *rankDataHeap) Init(maxRankDataCount int32,expireMs time.Duration){
|
||||
rd.rankDatas = make([]*ExpireData,0,maxRankDataCount)
|
||||
rd.expireMs = int64(expireMs)
|
||||
rd.mapExpireData = make(map[uint64]*ExpireData,512)
|
||||
heap.Init(rd)
|
||||
}
|
||||
|
||||
func (rd *rankDataHeap) Len() int {
|
||||
return len(rd.rankDatas)
|
||||
}
|
||||
|
||||
func (rd *rankDataHeap) Less(i, j int) bool {
|
||||
return rd.rankDatas[i].RefreshTimestamp < rd.rankDatas[j].RefreshTimestamp
|
||||
}
|
||||
|
||||
func (rd *rankDataHeap) Swap(i, j int) {
|
||||
rd.rankDatas[i], rd.rankDatas[j] = rd.rankDatas[j], rd.rankDatas[i]
|
||||
rd.rankDatas[i].Index,rd.rankDatas[j].Index = i,j
|
||||
}
|
||||
|
||||
func (rd *rankDataHeap) Push(x interface{}) {
|
||||
ed := x.(*ExpireData)
|
||||
ed.Index = len(rd.rankDatas)
|
||||
rd.rankDatas = append(rd.rankDatas,ed)
|
||||
}
|
||||
|
||||
func (rd *rankDataHeap) Pop() (ret interface{}) {
|
||||
l := len(rd.rankDatas)
|
||||
var retData *ExpireData
|
||||
rd.rankDatas, retData = rd.rankDatas[:l-1], rd.rankDatas[l-1]
|
||||
retData.Index = -1
|
||||
ret = retData
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (rd *rankDataHeap) PopExpireKey() uint64{
|
||||
if rd.Len() <= 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
if rd.rankDatas[0].RefreshTimestamp+rd.expireMs > time.Now().UnixNano() {
|
||||
return 0
|
||||
}
|
||||
|
||||
rankData := heap.Pop(rd).(*ExpireData)
|
||||
delete(rd.mapExpireData,rankData.Key)
|
||||
|
||||
return rankData.Key
|
||||
}
|
||||
|
||||
func (rd *rankDataHeap) PushOrRefreshExpireKey(key uint64,refreshTimestamp int64){
|
||||
//1.先删掉之前的
|
||||
expData ,ok := rd.mapExpireData[key]
|
||||
if ok == true {
|
||||
expData.RefreshTimestamp = refreshTimestamp
|
||||
heap.Fix(rd,expData.Index)
|
||||
return
|
||||
}
|
||||
|
||||
//2.直接插入
|
||||
expData = expireDataPool.Get().(*ExpireData)
|
||||
expData.Key = key
|
||||
expData.RefreshTimestamp = refreshTimestamp
|
||||
rd.mapExpireData[key] = expData
|
||||
|
||||
heap.Push(rd,expData)
|
||||
}
|
||||
|
||||
func (rd *rankDataHeap) RemoveExpireKey(key uint64){
|
||||
expData ,ok := rd.mapExpireData[key]
|
||||
if ok == false {
|
||||
return
|
||||
}
|
||||
|
||||
delete(rd.mapExpireData,key)
|
||||
heap.Remove(rd,expData.Index)
|
||||
expireDataPool.Put(expData)
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
52
sysservice/rankservice/RankFunc.go
Normal file
52
sysservice/rankservice/RankFunc.go
Normal file
@@ -0,0 +1,52 @@
|
||||
package rankservice
|
||||
|
||||
func transformLevel(level int32) interface{} {
|
||||
switch level {
|
||||
case 8:
|
||||
return uint8(0)
|
||||
case 16:
|
||||
return uint16(0)
|
||||
case 32:
|
||||
return uint32(0)
|
||||
case 64:
|
||||
return uint64(0)
|
||||
default:
|
||||
return uint32(0)
|
||||
}
|
||||
}
|
||||
|
||||
func compareIsEqual(firstSortData, secondSortData []int64) bool {
|
||||
firstLen := len(firstSortData)
|
||||
if firstLen != len(secondSortData) {
|
||||
return false
|
||||
}
|
||||
|
||||
for i := firstLen - 1; i >= 0; i-- {
|
||||
if firstSortData[i] != secondSortData[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func compareMoreThan(firstSortData, secondSortData []int64) int {
|
||||
firstLen := len(firstSortData)
|
||||
secondLen := len(secondSortData)
|
||||
minLen := firstLen
|
||||
if firstLen > secondLen {
|
||||
minLen = secondLen
|
||||
}
|
||||
|
||||
for i := 0; i < minLen; i++ {
|
||||
if firstSortData[i] > secondSortData[i] {
|
||||
return 1
|
||||
}
|
||||
|
||||
if firstSortData[i] < secondSortData[i] {
|
||||
return -1
|
||||
}
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
47
sysservice/rankservice/RankInterface.go
Normal file
47
sysservice/rankservice/RankInterface.go
Normal file
@@ -0,0 +1,47 @@
|
||||
package rankservice
|
||||
|
||||
import (
|
||||
"github.com/duanhf2012/origin/service"
|
||||
"github.com/duanhf2012/origin/rpc"
|
||||
)
|
||||
|
||||
type RankDataChangeType int8
|
||||
|
||||
type IRankSkip interface {
|
||||
GetRankID() uint64
|
||||
GetRankName() string
|
||||
GetRankLen() uint64
|
||||
UpsetRank(upsetData *rpc.RankData,refreshTimestamp int64,fromLoad bool) RankDataChangeType
|
||||
}
|
||||
|
||||
type IRankModule interface {
|
||||
service.IModule
|
||||
|
||||
|
||||
OnSetupRank(manual bool,rankSkip *RankSkip) error //当完成安装排行榜对象时
|
||||
OnStart() //服务开启时回调
|
||||
OnEnterRank(rankSkip IRankSkip, enterData *RankData) //进入排行
|
||||
OnLeaveRank(rankSkip IRankSkip, leaveData *RankData) //离开排行
|
||||
OnChangeRankData(rankSkip IRankSkip, changeData *RankData) //当排行数据变化时
|
||||
OnStop(mapRankSkip map[uint64]*RankSkip) //服务结束时回调
|
||||
}
|
||||
|
||||
type DefaultRankModule struct {
|
||||
service.Module
|
||||
}
|
||||
|
||||
func (dr *DefaultRankModule) OnStart(mapRankSkip map[uint64]*RankSkip) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dr *DefaultRankModule) OnEnterRank(rankSkip IRankSkip, enterData []*RankData) {
|
||||
}
|
||||
|
||||
func (dr *DefaultRankModule) OnLeaveRank(rankSkip IRankSkip, leaveData []*RankData) {
|
||||
}
|
||||
|
||||
func (dr *DefaultRankModule) OnChangeRankData(rankSkip IRankSkip, changeData []*RankData) {
|
||||
}
|
||||
|
||||
func (dr *DefaultRankModule) OnStop(mapRankSkip map[uint64]*RankSkip) {
|
||||
}
|
||||
223
sysservice/rankservice/RankService.go
Normal file
223
sysservice/rankservice/RankService.go
Normal file
@@ -0,0 +1,223 @@
|
||||
package rankservice
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/duanhf2012/origin/log"
|
||||
"github.com/duanhf2012/origin/rpc"
|
||||
"github.com/duanhf2012/origin/service"
|
||||
"time"
|
||||
)
|
||||
|
||||
const PreMapRankSkipLen = 10
|
||||
type RankService struct {
|
||||
service.Service
|
||||
|
||||
mapRankSkip map[uint64]*RankSkip
|
||||
rankModule IRankModule
|
||||
}
|
||||
|
||||
func (rs *RankService) OnInit() error {
|
||||
if rs.rankModule != nil {
|
||||
_, err := rs.AddModule(rs.rankModule)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
rs.AddModule(&DefaultRankModule{})
|
||||
}
|
||||
|
||||
rs.mapRankSkip = make(map[uint64]*RankSkip, PreMapRankSkipLen)
|
||||
err := rs.dealCfg()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rs *RankService) OnStart() {
|
||||
rs.rankModule.OnStart()
|
||||
}
|
||||
|
||||
func (rs *RankService) OnRelease() {
|
||||
rs.rankModule.OnStop(rs.mapRankSkip)
|
||||
}
|
||||
|
||||
// 安装排行模块
|
||||
func (rs *RankService) SetupRankModule(rankModule IRankModule) {
|
||||
rs.rankModule = rankModule
|
||||
}
|
||||
|
||||
// RPC_ManualAddRankSkip 提供手动添加排行榜
|
||||
func (rs *RankService) RPC_ManualAddRankSkip(addInfo *rpc.AddRankList, addResult *rpc.RankResult) error {
|
||||
for _, addRankListData := range addInfo.AddList {
|
||||
if addRankListData.RankId == 0 {
|
||||
return fmt.Errorf("RPC_AddRankSkip must has rank id")
|
||||
}
|
||||
|
||||
//重复的排行榜信息不允许添加
|
||||
rank := rs.mapRankSkip[addRankListData.RankId]
|
||||
if rank != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
newSkip := NewRankSkip(addRankListData.RankId,addRankListData.RankName,addRankListData.IsDec, transformLevel(addRankListData.SkipListLevel), addRankListData.MaxRank,time.Duration(addRankListData.ExpireMs)*time.Millisecond)
|
||||
newSkip.SetupRankModule(rs.rankModule)
|
||||
|
||||
rs.mapRankSkip[addRankListData.RankId] = newSkip
|
||||
rs.rankModule.OnSetupRank(true,newSkip)
|
||||
}
|
||||
|
||||
addResult.AddCount = 1
|
||||
return nil
|
||||
}
|
||||
|
||||
// RPC_UpsetRank 更新排行榜
|
||||
func (rs *RankService) RPC_UpsetRank(upsetInfo *rpc.UpsetRankData, upsetResult *rpc.RankResult) error {
|
||||
rankSkip, ok := rs.mapRankSkip[upsetInfo.RankId]
|
||||
if ok == false || rankSkip == nil {
|
||||
return fmt.Errorf("RPC_UpsetRank[", upsetInfo.RankId, "] no this rank id")
|
||||
}
|
||||
|
||||
addCount, updateCount := rankSkip.UpsetRankList(upsetInfo.RankDataList)
|
||||
upsetResult.AddCount = addCount
|
||||
upsetResult.ModifyCount = updateCount
|
||||
return nil
|
||||
}
|
||||
|
||||
// RPC_DeleteRankDataByKey 按key从排行榜中进行删除
|
||||
func (rs *RankService) RPC_DeleteRankDataByKey(delInfo *rpc.DeleteByKey, delResult *rpc.RankResult) error {
|
||||
rankSkip, ok := rs.mapRankSkip[delInfo.RankId]
|
||||
if ok == false || rankSkip == nil {
|
||||
return fmt.Errorf("RPC_DeleteRankDataByKey[", delInfo.RankId, "] no this rank type")
|
||||
}
|
||||
|
||||
removeCount := rankSkip.DeleteRankData(delInfo.KeyList)
|
||||
if removeCount == 0 {
|
||||
log.SError("remove count is zero")
|
||||
}
|
||||
|
||||
delResult.RemoveCount = removeCount
|
||||
return nil
|
||||
}
|
||||
|
||||
// RPC_FindRankDataByKey 按key查找,返回对应的排行名次信息
|
||||
func (rs *RankService) RPC_FindRankDataByKey(findInfo *rpc.FindRankDataByKey, findResult *rpc.RankPosData) error {
|
||||
rankObj, ok := rs.mapRankSkip[findInfo.RankId]
|
||||
if ok == false || rankObj == nil {
|
||||
return fmt.Errorf("RPC_FindRankDataByKey[", findInfo.RankId, "] no this rank type")
|
||||
}
|
||||
|
||||
findRankData, rank := rankObj.GetRankNodeData(findInfo.Key)
|
||||
if findRankData != nil {
|
||||
findResult.Data = findRankData.Data
|
||||
findResult.Key = findRankData.Key
|
||||
findResult.SortData = findRankData.SortData
|
||||
findResult.Rank = rank
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RPC_FindRankDataByRank 按pos查找
|
||||
func (rs *RankService) RPC_FindRankDataByRank(findInfo *rpc.FindRankDataByRank, findResult *rpc.RankPosData) error {
|
||||
rankObj, ok := rs.mapRankSkip[findInfo.RankId]
|
||||
if ok == false || rankObj == nil {
|
||||
return fmt.Errorf("RPC_FindRankDataByKey[", findInfo.RankId, "] no this rank type")
|
||||
}
|
||||
|
||||
findRankData, rankPos := rankObj.GetRankNodeDataByRank(findInfo.Rank)
|
||||
if findRankData != nil {
|
||||
findResult.Data = findRankData.Data
|
||||
findResult.Key = findRankData.Key
|
||||
findResult.SortData = findRankData.SortData
|
||||
findResult.Rank = rankPos
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RPC_FindRankDataList 按StartRank查找,从StartRank开始count个排行数据
|
||||
func (rs *RankService) RPC_FindRankDataList(findInfo *rpc.FindRankDataList, findResult *rpc.RankDataList) error {
|
||||
rankObj, ok := rs.mapRankSkip[findInfo.RankId]
|
||||
if ok == false || rankObj == nil {
|
||||
err := fmt.Errorf("not config rank %d",findInfo.RankId)
|
||||
log.SError(err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
findResult.RankDataCount = rankObj.GetRankLen()
|
||||
err := rankObj.GetRankDataFromToLimit(findInfo.StartRank-1, findInfo.Count, findResult)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
//查询附带的key
|
||||
if findInfo.Key!= 0 {
|
||||
findRankData, rank := rankObj.GetRankNodeData(findInfo.Key)
|
||||
if findRankData != nil {
|
||||
findResult.KeyRank = &rpc.RankPosData{}
|
||||
findResult.KeyRank.Data = findRankData.Data
|
||||
findResult.KeyRank.Key = findRankData.Key
|
||||
findResult.KeyRank.SortData = findRankData.SortData
|
||||
findResult.KeyRank.Rank = rank
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rs *RankService) deleteRankList(delIdList []uint64) {
|
||||
if rs.mapRankSkip == nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, id := range delIdList {
|
||||
delete(rs.mapRankSkip, id)
|
||||
}
|
||||
}
|
||||
|
||||
func (rs *RankService) dealCfg() error {
|
||||
mapDBServiceCfg, ok := rs.GetServiceCfg().(map[string]interface{})
|
||||
if ok == false {
|
||||
return nil
|
||||
}
|
||||
|
||||
cfgList, okList := mapDBServiceCfg["SortCfg"].([]interface{})
|
||||
if okList == false {
|
||||
return fmt.Errorf("RankService SortCfg must be list")
|
||||
}
|
||||
|
||||
for _, cfg := range cfgList {
|
||||
mapCfg, okCfg := cfg.(map[string]interface{})
|
||||
if okCfg == false {
|
||||
return fmt.Errorf("RankService SortCfg data must be map or struct")
|
||||
}
|
||||
|
||||
rankId, okId := mapCfg["RankID"].(float64)
|
||||
if okId == false || uint64(rankId)==0 {
|
||||
return fmt.Errorf("RankService SortCfg data must has RankID[number]")
|
||||
}
|
||||
|
||||
rankName, okId := mapCfg["RankName"].(string)
|
||||
if okId == false || len(rankName)==0 {
|
||||
return fmt.Errorf("RankService SortCfg data must has RankName[string]")
|
||||
}
|
||||
|
||||
level, _ := mapCfg["SkipListLevel"].(float64)
|
||||
isDec, _ := mapCfg["IsDec"].(bool)
|
||||
maxRank, _ := mapCfg["MaxRank"].(float64)
|
||||
expireMs, _ := mapCfg["ExpireMs"].(float64)
|
||||
|
||||
|
||||
newSkip := NewRankSkip(uint64(rankId),rankName,isDec, transformLevel(int32(level)), uint64(maxRank),time.Duration(expireMs)*time.Millisecond)
|
||||
newSkip.SetupRankModule(rs.rankModule)
|
||||
rs.mapRankSkip[uint64(rankId)] = newSkip
|
||||
err := rs.rankModule.OnSetupRank(false,newSkip)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
304
sysservice/rankservice/RankSkip.go
Normal file
304
sysservice/rankservice/RankSkip.go
Normal file
@@ -0,0 +1,304 @@
|
||||
package rankservice
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/duanhf2012/origin/rpc"
|
||||
"github.com/duanhf2012/origin/util/algorithms/skip"
|
||||
"time"
|
||||
)
|
||||
|
||||
type RankSkip struct {
|
||||
rankId uint64 //排行榜ID
|
||||
rankName string //排行榜名称
|
||||
isDes bool //是否为降序 true:降序 false:升序
|
||||
skipList *skip.SkipList //跳表
|
||||
mapRankData map[uint64]*RankData //排行数据map
|
||||
maxLen uint64 //排行数据长度
|
||||
expireMs time.Duration //有效时间
|
||||
rankModule IRankModule
|
||||
rankDataExpire rankDataHeap
|
||||
}
|
||||
|
||||
const MaxPickExpireNum = 128
|
||||
const (
|
||||
RankDataNone RankDataChangeType = 0
|
||||
RankDataAdd RankDataChangeType = 1 //数据插入
|
||||
RankDataUpdate RankDataChangeType = 2 //数据更新
|
||||
RankDataDelete RankDataChangeType = 3 //数据删除
|
||||
)
|
||||
|
||||
// NewRankSkip 创建排行榜
|
||||
func NewRankSkip(rankId uint64,rankName string,isDes bool, level interface{}, maxLen uint64,expireMs time.Duration) *RankSkip {
|
||||
rs := &RankSkip{}
|
||||
|
||||
rs.rankId = rankId
|
||||
rs.rankName = rankName
|
||||
rs.isDes = isDes
|
||||
rs.skipList = skip.New(level)
|
||||
rs.mapRankData = make(map[uint64]*RankData, 10240)
|
||||
rs.maxLen = maxLen
|
||||
rs.expireMs = expireMs
|
||||
rs.rankDataExpire.Init(int32(maxLen),expireMs)
|
||||
|
||||
return rs
|
||||
}
|
||||
|
||||
func (rs *RankSkip) pickExpireKey(){
|
||||
if rs.expireMs == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
for i:=1;i<=MaxPickExpireNum;i++{
|
||||
key := rs.rankDataExpire.PopExpireKey()
|
||||
if key == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
rs.DeleteRankData([]uint64{key})
|
||||
}
|
||||
}
|
||||
|
||||
func (rs *RankSkip) SetupRankModule(rankModule IRankModule) {
|
||||
rs.rankModule = rankModule
|
||||
}
|
||||
|
||||
// GetRankID 获取排行榜Id
|
||||
func (rs *RankSkip) GetRankID() uint64 {
|
||||
return rs.rankId
|
||||
}
|
||||
|
||||
// GetRankName 获取排行榜名称
|
||||
func (rs *RankSkip) GetRankName() string {
|
||||
return rs.rankName
|
||||
}
|
||||
|
||||
// GetRankLen 获取排行榜长度
|
||||
func (rs *RankSkip) GetRankLen() uint64 {
|
||||
return rs.skipList.Len()
|
||||
}
|
||||
|
||||
func (rs *RankSkip) UpsetRankList(upsetRankData []*rpc.RankData) (addCount int32, modifyCount int32) {
|
||||
for _, upsetData := range upsetRankData {
|
||||
changeType := rs.UpsetRank(upsetData,time.Now().UnixNano(),false)
|
||||
if changeType == RankDataAdd{
|
||||
addCount+=1
|
||||
} else if changeType == RankDataUpdate{
|
||||
modifyCount+=1
|
||||
}
|
||||
}
|
||||
|
||||
rs.pickExpireKey()
|
||||
return
|
||||
}
|
||||
|
||||
// UpsetRank 更新玩家排行数据,返回变化后的数据及变化类型
|
||||
func (rs *RankSkip) UpsetRank(upsetData *rpc.RankData,refreshTimestamp int64,fromLoad bool) RankDataChangeType {
|
||||
rankNode, ok := rs.mapRankData[upsetData.Key]
|
||||
if ok == true {
|
||||
//找到的情况对比排名数据是否有变化,无变化进行data更新,有变化则进行删除更新
|
||||
if compareIsEqual(rankNode.SortData, upsetData.SortData) {
|
||||
rankNode.Data = upsetData.GetData()
|
||||
rankNode.refreshTimestamp = refreshTimestamp
|
||||
|
||||
if fromLoad == false {
|
||||
rs.rankModule.OnChangeRankData(rs,rankNode)
|
||||
}
|
||||
rs.rankDataExpire.PushOrRefreshExpireKey(upsetData.Key,refreshTimestamp)
|
||||
return RankDataUpdate
|
||||
}
|
||||
|
||||
if upsetData.Data == nil {
|
||||
upsetData.Data = rankNode.Data
|
||||
}
|
||||
rs.skipList.Delete(rankNode)
|
||||
ReleaseRankData(rankNode)
|
||||
|
||||
newRankData := NewRankData(rs.isDes, upsetData,refreshTimestamp)
|
||||
rs.skipList.Insert(newRankData)
|
||||
rs.mapRankData[upsetData.Key] = newRankData
|
||||
|
||||
//刷新有效期
|
||||
rs.rankDataExpire.PushOrRefreshExpireKey(upsetData.Key,refreshTimestamp)
|
||||
|
||||
if fromLoad == false {
|
||||
rs.rankModule.OnChangeRankData(rs, newRankData)
|
||||
}
|
||||
return RankDataUpdate
|
||||
}
|
||||
|
||||
if rs.checkInsertAndReplace(upsetData) {
|
||||
newRankData := NewRankData(rs.isDes, upsetData,refreshTimestamp)
|
||||
rs.skipList.Insert(newRankData)
|
||||
rs.mapRankData[upsetData.Key] = newRankData
|
||||
rs.rankDataExpire.PushOrRefreshExpireKey(upsetData.Key,refreshTimestamp)
|
||||
|
||||
if fromLoad == false {
|
||||
rs.rankModule.OnEnterRank(rs, newRankData)
|
||||
}
|
||||
|
||||
return RankDataAdd
|
||||
}
|
||||
|
||||
return RankDataNone
|
||||
}
|
||||
|
||||
// DeleteRankData 删除排行数据
|
||||
func (rs *RankSkip) DeleteRankData(delKeys []uint64) int32 {
|
||||
var removeRankData int32
|
||||
//预统计处理,进行回调
|
||||
for _, key := range delKeys {
|
||||
rankData, ok := rs.mapRankData[key]
|
||||
if ok == false {
|
||||
continue
|
||||
}
|
||||
|
||||
removeRankData+=1
|
||||
rs.skipList.Delete(rankData)
|
||||
delete(rs.mapRankData, rankData.Key)
|
||||
rs.rankDataExpire.RemoveExpireKey(rankData.Key)
|
||||
rs.rankModule.OnLeaveRank(rs, rankData)
|
||||
ReleaseRankData(rankData)
|
||||
}
|
||||
|
||||
return removeRankData
|
||||
}
|
||||
|
||||
// GetRankNodeData 获取,返回排名节点与名次
|
||||
func (rs *RankSkip) GetRankNodeData(findKey uint64) (*RankData, uint64) {
|
||||
rankNode, ok := rs.mapRankData[findKey]
|
||||
if ok == false {
|
||||
return nil, 0
|
||||
}
|
||||
|
||||
rs.pickExpireKey()
|
||||
_, index := rs.skipList.GetWithPosition(rankNode)
|
||||
return rankNode, index+1
|
||||
}
|
||||
|
||||
// GetRankNodeDataByPos 获取,返回排名节点与名次
|
||||
func (rs *RankSkip) GetRankNodeDataByRank(rank uint64) (*RankData, uint64) {
|
||||
rs.pickExpireKey()
|
||||
rankNode := rs.skipList.ByPosition(rank-1)
|
||||
if rankNode == nil {
|
||||
return nil, 0
|
||||
}
|
||||
|
||||
return rankNode.(*RankData), rank
|
||||
}
|
||||
|
||||
// GetRankKeyPrevToLimit 获取key前count名的数据
|
||||
func (rs *RankSkip) GetRankKeyPrevToLimit(findKey, count uint64, result *rpc.RankDataList) error {
|
||||
if rs.GetRankLen() <= 0 {
|
||||
return fmt.Errorf("rank[", rs.rankId, "] no data")
|
||||
}
|
||||
|
||||
findData, ok := rs.mapRankData[findKey]
|
||||
if ok == false {
|
||||
return fmt.Errorf("rank[", rs.rankId, "] no data")
|
||||
}
|
||||
|
||||
_, rankPos := rs.skipList.GetWithPosition(findData)
|
||||
iter := rs.skipList.Iter(findData)
|
||||
iterCount := uint64(0)
|
||||
for iter.Prev() && iterCount < count {
|
||||
rankData := iter.Value().(*RankData)
|
||||
result.RankPosDataList = append(result.RankPosDataList, &rpc.RankPosData{
|
||||
Key: rankData.Key,
|
||||
Rank: rankPos - iterCount+1,
|
||||
SortData: rankData.SortData,
|
||||
Data: rankData.Data,
|
||||
})
|
||||
iterCount++
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetRankKeyPrevToLimit 获取key前count名的数据
|
||||
func (rs *RankSkip) GetRankKeyNextToLimit(findKey, count uint64, result *rpc.RankDataList) error {
|
||||
if rs.GetRankLen() <= 0 {
|
||||
return fmt.Errorf("rank[", rs.rankId, "] no data")
|
||||
}
|
||||
|
||||
findData, ok := rs.mapRankData[findKey]
|
||||
if ok == false {
|
||||
return fmt.Errorf("rank[", rs.rankId, "] no data")
|
||||
}
|
||||
|
||||
_, rankPos := rs.skipList.GetWithPosition(findData)
|
||||
iter := rs.skipList.Iter(findData)
|
||||
iterCount := uint64(0)
|
||||
for iter.Next() && iterCount < count {
|
||||
rankData := iter.Value().(*RankData)
|
||||
result.RankPosDataList = append(result.RankPosDataList, &rpc.RankPosData{
|
||||
Key: rankData.Key,
|
||||
Rank: rankPos + iterCount+1,
|
||||
SortData: rankData.SortData,
|
||||
Data: rankData.Data,
|
||||
})
|
||||
iterCount++
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetRankList 获取排行榜数据,startPos开始的count个数据
|
||||
func (rs *RankSkip) GetRankDataFromToLimit(startPos, count uint64, result *rpc.RankDataList) error {
|
||||
if rs.GetRankLen() <= 0 {
|
||||
//初始排行榜可能没有数据
|
||||
return nil
|
||||
}
|
||||
|
||||
rs.pickExpireKey()
|
||||
if result.RankDataCount < startPos {
|
||||
startPos = result.RankDataCount - 1
|
||||
}
|
||||
|
||||
iter := rs.skipList.IterAtPosition(startPos)
|
||||
iterCount := uint64(0)
|
||||
for iter.Next() && iterCount < count {
|
||||
rankData := iter.Value().(*RankData)
|
||||
result.RankPosDataList = append(result.RankPosDataList, &rpc.RankPosData{
|
||||
Key: rankData.Key,
|
||||
Rank: iterCount + startPos+1,
|
||||
SortData: rankData.SortData,
|
||||
Data: rankData.Data,
|
||||
})
|
||||
iterCount++
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkCanInsert 检查是否能插入
|
||||
func (rs *RankSkip) checkInsertAndReplace(upsetData *rpc.RankData) bool {
|
||||
//maxLen为0,不限制长度
|
||||
if rs.maxLen == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
//没有放满,则进行插入
|
||||
rankLen := rs.skipList.Len()
|
||||
if rs.maxLen > rankLen {
|
||||
return true
|
||||
}
|
||||
|
||||
//已经放满了,进行数据比较
|
||||
lastPosData := rs.skipList.ByPosition(rankLen - 1)
|
||||
lastRankData := lastPosData.(*RankData)
|
||||
moreThanFlag := compareMoreThan(upsetData.SortData, lastRankData.SortData)
|
||||
//降序排列,比最后一位小,不能插入 升序排列,比最后一位大,不能插入
|
||||
if (rs.isDes == true && moreThanFlag < 0) || (rs.isDes == false && moreThanFlag > 0) || moreThanFlag == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
//移除最后一位
|
||||
//回调模块,该RandData从排行中删除
|
||||
rs.rankDataExpire.RemoveExpireKey(lastRankData.Key)
|
||||
rs.rankModule.OnLeaveRank(rs, lastRankData)
|
||||
rs.skipList.Delete(lastPosData)
|
||||
delete(rs.mapRankData, lastRankData.Key)
|
||||
ReleaseRankData(lastRankData)
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -21,9 +21,6 @@ type TcpService struct {
|
||||
mapClientLocker sync.RWMutex
|
||||
mapClient map[uint64] *Client
|
||||
process processor.IProcessor
|
||||
|
||||
ReadDeadline time.Duration
|
||||
WriteDeadline time.Duration
|
||||
}
|
||||
|
||||
type TcpPackType int8
|
||||
@@ -34,14 +31,6 @@ const(
|
||||
TPT_UnknownPack TcpPackType = 3
|
||||
)
|
||||
|
||||
const Default_MaxConnNum = 3000
|
||||
const Default_PendingWriteNum = 10000
|
||||
const Default_LittleEndian = false
|
||||
const Default_MinMsgLen = 2
|
||||
const Default_MaxMsgLen = 65535
|
||||
const Default_ReadDeadline = 180 //30s
|
||||
const Default_WriteDeadline = 180 //30s
|
||||
|
||||
const (
|
||||
MaxNodeId = 1<<14 - 1 //最大值 16383
|
||||
MaxSeed = 1<<19 - 1 //最大值 524287
|
||||
@@ -89,14 +78,6 @@ func (tcpService *TcpService) OnInit() error{
|
||||
}
|
||||
|
||||
tcpService.tcpServer.Addr = addr.(string)
|
||||
tcpService.tcpServer.MaxConnNum = Default_MaxConnNum
|
||||
tcpService.tcpServer.PendingWriteNum = Default_PendingWriteNum
|
||||
tcpService.tcpServer.LittleEndian = Default_LittleEndian
|
||||
tcpService.tcpServer.MinMsgLen = Default_MinMsgLen
|
||||
tcpService.tcpServer.MaxMsgLen = Default_MaxMsgLen
|
||||
tcpService.ReadDeadline = Default_ReadDeadline
|
||||
tcpService.WriteDeadline = Default_WriteDeadline
|
||||
|
||||
MaxConnNum,ok := tcpCfg["MaxConnNum"]
|
||||
if ok == true {
|
||||
tcpService.tcpServer.MaxConnNum = int(MaxConnNum.(float64))
|
||||
@@ -120,12 +101,12 @@ func (tcpService *TcpService) OnInit() error{
|
||||
|
||||
readDeadline,ok := tcpCfg["ReadDeadline"]
|
||||
if ok == true {
|
||||
tcpService.ReadDeadline = time.Second*time.Duration(readDeadline.(float64))
|
||||
tcpService.tcpServer.ReadDeadline = time.Second*time.Duration(readDeadline.(float64))
|
||||
}
|
||||
|
||||
writeDeadline,ok := tcpCfg["WriteDeadline"]
|
||||
if ok == true {
|
||||
tcpService.WriteDeadline = time.Second*time.Duration(writeDeadline.(float64))
|
||||
tcpService.tcpServer.WriteDeadline = time.Second*time.Duration(writeDeadline.(float64))
|
||||
}
|
||||
|
||||
tcpService.mapClient = make( map[uint64] *Client, tcpService.tcpServer.MaxConnNum)
|
||||
@@ -195,7 +176,7 @@ func (slf *Client) Run() {
|
||||
break
|
||||
}
|
||||
|
||||
slf.tcpConn.SetReadDeadline(slf.tcpService.ReadDeadline)
|
||||
slf.tcpConn.SetReadDeadline(slf.tcpService.tcpServer.ReadDeadline)
|
||||
bytes,err := slf.tcpConn.ReadMsg()
|
||||
if err != nil {
|
||||
log.SDebug("read client id ",slf.id," is error:",err.Error())
|
||||
@@ -231,7 +212,6 @@ func (tcpService *TcpService) SendMsg(clientId uint64,msg interface{}) error{
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
client.tcpConn.SetWriteDeadline(tcpService.WriteDeadline)
|
||||
return client.tcpConn.WriteMsg(bytes)
|
||||
}
|
||||
|
||||
@@ -271,7 +251,6 @@ func (tcpService *TcpService) SendRawMsg(clientId uint64,msg []byte) error{
|
||||
return fmt.Errorf("client %d is disconnect!",clientId)
|
||||
}
|
||||
tcpService.mapClientLocker.Unlock()
|
||||
client.tcpConn.SetWriteDeadline(tcpService.WriteDeadline)
|
||||
return client.tcpConn.WriteMsg(msg)
|
||||
}
|
||||
|
||||
@@ -283,7 +262,6 @@ func (tcpService *TcpService) SendRawData(clientId uint64,data []byte) error{
|
||||
return fmt.Errorf("client %d is disconnect!",clientId)
|
||||
}
|
||||
tcpService.mapClientLocker.Unlock()
|
||||
client.tcpConn.SetWriteDeadline(tcpService.WriteDeadline)
|
||||
return client.tcpConn.WriteRawMsg(data)
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
package algorithms
|
||||
|
||||
|
||||
type NumberType interface {
|
||||
int | int8 | int16 | int32 | int64 | string | float32 | float64 | uint | uint8 | uint16 | uint32 | uint64
|
||||
}
|
||||
@@ -9,8 +8,16 @@ type Element[ValueType NumberType] interface {
|
||||
GetValue() ValueType
|
||||
}
|
||||
|
||||
//BiSearch 二分查找,切片必需有序号。matchUp表示是否向上范围查找。比如:数列10 20 30 ,当value传入25时,返回结果是2,表示落到3的范围
|
||||
func BiSearch[ValueType NumberType, T Element[ValueType]](sElement []T, value ValueType, matchUp bool) int {
|
||||
/*
|
||||
BiSearch 二分查找,切片必需有序
|
||||
matchUp规则如下:
|
||||
参数为0时,则一定要找到相等的值
|
||||
参数-1时,找value左边的值,例如:[10,20,30,40],当value为9时返回-1; 当value为11时,返回0 当value为41时,返回 3
|
||||
参数 1时,找value右边的值,例如:[10,20,30,40],当value为9时返回 0; 当value为11时,返回1 当value为41时,返回-1
|
||||
|
||||
返回-1时代表没有找到下标
|
||||
*/
|
||||
func BiSearch[ValueType NumberType, T Element[ValueType]](sElement []T, value ValueType, matchUp int) int {
|
||||
low, high := 0, len(sElement)-1
|
||||
if high == -1 {
|
||||
return -1
|
||||
@@ -28,12 +35,31 @@ func BiSearch[ValueType NumberType, T Element[ValueType]](sElement []T, value Va
|
||||
}
|
||||
}
|
||||
|
||||
if matchUp == true {
|
||||
if (sElement[mid].GetValue()) < value &&
|
||||
(mid+1 < len(sElement)-1) {
|
||||
switch matchUp {
|
||||
case 1:
|
||||
if (sElement[mid].GetValue()) < value {
|
||||
if mid+1 >= len(sElement) {
|
||||
return -1
|
||||
}
|
||||
return mid + 1
|
||||
}
|
||||
return mid
|
||||
case -1:
|
||||
if (sElement[mid].GetValue()) > value {
|
||||
if mid-1 < 0 {
|
||||
return -1
|
||||
} else {
|
||||
return mid - 1
|
||||
}
|
||||
} else if (sElement[mid].GetValue()) < value {
|
||||
//if mid+1 < len(sElement)-1 {
|
||||
// return mid + 1
|
||||
//} else {
|
||||
return mid
|
||||
//}
|
||||
} else {
|
||||
return mid
|
||||
}
|
||||
}
|
||||
|
||||
return -1
|
||||
|
||||
61
util/algorithms/BitwiseOperation.go
Normal file
61
util/algorithms/BitwiseOperation.go
Normal file
@@ -0,0 +1,61 @@
|
||||
package algorithms
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
type BitNumber interface {
|
||||
int | int8 | int16 | int32 | int64 | uint | uint8 | uint16 | uint32 | uint64 | uintptr
|
||||
}
|
||||
|
||||
type UnsignedNumber interface {
|
||||
uint | uint8 | uint16 | uint32 | uint64 | uintptr
|
||||
}
|
||||
|
||||
func getBitTagIndex[Number BitNumber, UNumber UnsignedNumber](bitBuff []Number, bitPositionIndex UNumber) (uintptr, uintptr, bool) {
|
||||
sliceIndex := uintptr(bitPositionIndex) / (8 * unsafe.Sizeof(bitBuff[0]))
|
||||
sliceBitIndex := uintptr(bitPositionIndex) % (8 * unsafe.Sizeof(bitBuff[0]))
|
||||
|
||||
//位index不能越界
|
||||
if uintptr(bitPositionIndex) >= uintptr(len(bitBuff))*unsafe.Sizeof(bitBuff[0])*8 {
|
||||
return 0, 0, false
|
||||
}
|
||||
return sliceIndex, sliceBitIndex, true
|
||||
}
|
||||
|
||||
func setBitTagByIndex[Number BitNumber, UNumber UnsignedNumber](bitBuff []Number, bitPositionIndex UNumber, setTag bool) bool {
|
||||
sliceIndex, sliceBitIndex, ret := getBitTagIndex(bitBuff, bitPositionIndex)
|
||||
if ret == false {
|
||||
return ret
|
||||
}
|
||||
|
||||
if setTag {
|
||||
bitBuff[sliceIndex] = bitBuff[sliceIndex] | 1<<sliceBitIndex
|
||||
} else {
|
||||
bitBuff[sliceIndex] = bitBuff[sliceIndex] &^ (1 << sliceBitIndex)
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func GetBitwiseTag[Number BitNumber, UNumber UnsignedNumber](bitBuff []Number, bitPositionIndex UNumber) (bool, error) {
|
||||
sliceIndex, sliceBitIndex, ret := getBitTagIndex(bitBuff, bitPositionIndex)
|
||||
if ret == false {
|
||||
return false, errors.New("Invalid parameter")
|
||||
}
|
||||
|
||||
return (bitBuff[sliceIndex] & (1 << sliceBitIndex)) > 0, nil
|
||||
}
|
||||
|
||||
func SetBitwiseTag[Number BitNumber, UNumber UnsignedNumber](bitBuff []Number, bitPositionIndex UNumber) bool {
|
||||
return setBitTagByIndex(bitBuff, bitPositionIndex, true)
|
||||
}
|
||||
|
||||
func ClearBitwiseTag[Number BitNumber, UNumber UnsignedNumber](bitBuff []Number, bitPositionIndex UNumber) bool {
|
||||
return setBitTagByIndex(bitBuff, bitPositionIndex, false)
|
||||
}
|
||||
|
||||
func GetBitwiseNum[Number BitNumber](bitBuff []Number) int {
|
||||
return len(bitBuff) * int(unsafe.Sizeof(bitBuff[0])*8)
|
||||
}
|
||||
37
util/algorithms/BitwiseOperation_test.go
Normal file
37
util/algorithms/BitwiseOperation_test.go
Normal file
@@ -0,0 +1,37 @@
|
||||
package algorithms
|
||||
|
||||
import "testing"
|
||||
|
||||
func Test_Bitwise(t *testing.T) {
|
||||
//1.预分配10个byte切片,用于存储位标识
|
||||
byteBuff := make([]byte, 10)
|
||||
|
||||
//2.获取buff总共位数
|
||||
bitNum := GetBitwiseNum(byteBuff)
|
||||
t.Log(bitNum)
|
||||
|
||||
//3..对索引79位打标记,注意是从0开始,79即为最后一个位
|
||||
idx := uint(79)
|
||||
|
||||
//4.对byteBuff索引idx位置打上标记
|
||||
SetBitwiseTag(byteBuff, idx)
|
||||
|
||||
//5.获取索引idx位置标记
|
||||
isTag, ret := GetBitwiseTag(byteBuff, idx)
|
||||
t.Log("set index ", idx, " :", isTag, ret)
|
||||
if isTag != true {
|
||||
t.Fatal("error")
|
||||
}
|
||||
|
||||
//6.清除掉索引idx位标记
|
||||
ClearBitwiseTag(byteBuff, idx)
|
||||
|
||||
//7.获取索引idx位置标记
|
||||
isTag, ret = GetBitwiseTag(byteBuff, idx)
|
||||
t.Log("get index ", idx, " :", isTag, ret)
|
||||
|
||||
if isTag != false {
|
||||
t.Fatal("error")
|
||||
}
|
||||
|
||||
}
|
||||
47
util/algorithms/skip/interface.go
Normal file
47
util/algorithms/skip/interface.go
Normal file
@@ -0,0 +1,47 @@
|
||||
/*
|
||||
Copyright 2014 Workiva, LLC
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package skip
|
||||
|
||||
// Comparator is a generic interface that represents items that can
|
||||
// be compared.
|
||||
type Comparator interface {
|
||||
// Compare compares this interface with another. Returns a positive
|
||||
// number if this interface is greater, 0 if equal, negative number
|
||||
// if less.
|
||||
Compare(Comparator) int
|
||||
}
|
||||
|
||||
// Comparators is a typed list of type Comparator.
|
||||
type Comparators []Comparator
|
||||
|
||||
// Iterator defines an interface that allows a consumer to iterate
|
||||
// all results of a query. All values will be visited in-order.
|
||||
type Iterator interface {
|
||||
// Next returns a bool indicating if there is future value
|
||||
// in the iterator and moves the iterator to that value.
|
||||
Next() bool
|
||||
// Prev returns a bool indicating if there is Previous value
|
||||
// in the iterator and moves the iterator to that value.
|
||||
Prev() bool
|
||||
// Value returns a Comparator representing the iterator's current
|
||||
// position. If there is no value, this returns nil.
|
||||
Value() Comparator
|
||||
// exhaust is a helper method that will iterate this iterator
|
||||
// to completion and return a list of resulting Entries
|
||||
// in order.
|
||||
exhaust() Comparators
|
||||
}
|
||||
86
util/algorithms/skip/iterator.go
Normal file
86
util/algorithms/skip/iterator.go
Normal file
@@ -0,0 +1,86 @@
|
||||
/*
|
||||
Copyright 2014 Workiva, LLC
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package skip
|
||||
|
||||
const iteratorExhausted = -2
|
||||
|
||||
// iterator represents an object that can be iterated. It will
|
||||
// return false on Next and nil on Value if there are no further
|
||||
// values to be iterated.
|
||||
type iterator struct {
|
||||
first bool
|
||||
n *node
|
||||
}
|
||||
|
||||
// Next returns a bool indicating if there are any further values
|
||||
// in this iterator.
|
||||
func (iter *iterator) Next() bool {
|
||||
if iter.first {
|
||||
iter.first = false
|
||||
return iter.n != nil
|
||||
}
|
||||
|
||||
if iter.n == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
iter.n = iter.n.forward[0]
|
||||
return iter.n != nil
|
||||
}
|
||||
|
||||
// Prev returns a bool indicating if there are any Previous values
|
||||
// in this iterator.
|
||||
func (iter *iterator) Prev() bool {
|
||||
if iter.first {
|
||||
iter.first = false
|
||||
return iter.n != nil
|
||||
}
|
||||
|
||||
if iter.n == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
iter.n = iter.n.preNode
|
||||
return iter.n != nil && iter.n.entry != nil
|
||||
}
|
||||
|
||||
// Value returns a Comparator representing the iterator's present
|
||||
// position in the query. Returns nil if no values remain to iterate.
|
||||
func (iter *iterator) Value() Comparator {
|
||||
if iter.n == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return iter.n.entry
|
||||
}
|
||||
|
||||
// exhaust is a helper method to exhaust this iterator and return
|
||||
// all remaining entries.
|
||||
func (iter *iterator) exhaust() Comparators {
|
||||
entries := make(Comparators, 0, 10)
|
||||
for i := iter; i.Next(); {
|
||||
entries = append(entries, i.Value())
|
||||
}
|
||||
|
||||
return entries
|
||||
}
|
||||
|
||||
// nilIterator returns an iterator that will always return false
|
||||
// for Next and nil for Value.
|
||||
func nilIterator() *iterator {
|
||||
return &iterator{}
|
||||
}
|
||||
50
util/algorithms/skip/node.go
Normal file
50
util/algorithms/skip/node.go
Normal file
@@ -0,0 +1,50 @@
|
||||
/*
|
||||
Copyright 2014 Workiva, LLC
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package skip
|
||||
|
||||
type widths []uint64
|
||||
|
||||
type nodes []*node
|
||||
|
||||
type node struct {
|
||||
// forward denotes the forward pointing pointers in this
|
||||
// node.
|
||||
forward nodes
|
||||
//zero level pre node
|
||||
preNode *node
|
||||
// widths keeps track of the distance between this pointer
|
||||
// and the forward pointers so we can access skip list
|
||||
// values by position in logarithmic time.
|
||||
widths widths
|
||||
// entry is the associated value with this node.
|
||||
entry Comparator
|
||||
}
|
||||
|
||||
func (n *node) Compare(e Comparator) int {
|
||||
return n.entry.Compare(e)
|
||||
}
|
||||
|
||||
// newNode will allocate and return a new node with the entry
|
||||
// provided. maxLevels will determine the length of the forward
|
||||
// pointer list associated with this node.
|
||||
func newNode(cmp Comparator, maxLevels uint8) *node {
|
||||
return &node{
|
||||
entry: cmp,
|
||||
forward: make(nodes, maxLevels),
|
||||
widths: make(widths, maxLevels),
|
||||
}
|
||||
}
|
||||
494
util/algorithms/skip/skip.go
Normal file
494
util/algorithms/skip/skip.go
Normal file
@@ -0,0 +1,494 @@
|
||||
/*
|
||||
Copyright 2014 Workiva, LLC
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
Package skip defines a skiplist datastructure. That is, a data structure
|
||||
that probabilistically determines relationships between keys. By doing
|
||||
so, it becomes easier to program than a binary search tree but maintains
|
||||
similar speeds.
|
||||
|
||||
Performance characteristics:
|
||||
Insert: O(log n)
|
||||
Search: O(log n)
|
||||
Delete: O(log n)
|
||||
Space: O(n)
|
||||
|
||||
Recently added is the capability to address, insert, and replace an
|
||||
entry by position. This capability is acheived by saving the width
|
||||
of the "gap" between two nodes. Searching for an item by position is
|
||||
very similar to searching by value in that the same basic algorithm is
|
||||
used but we are searching for width instead of value. Because this avoids
|
||||
the overhead associated with Golang interfaces, operations by position
|
||||
are about twice as fast as operations by value. Time complexities listed
|
||||
below.
|
||||
|
||||
SearchByPosition: O(log n)
|
||||
InsertByPosition: O(log n)
|
||||
|
||||
More information here: http://cglab.ca/~morin/teaching/5408/refs/p90b.pdf
|
||||
|
||||
Benchmarks:
|
||||
BenchmarkInsert-8 2000000 930 ns/op
|
||||
BenchmarkGet-8 2000000 989 ns/op
|
||||
BenchmarkDelete-8 3000000 600 ns/op
|
||||
BenchmarkPrepend-8 1000000 1468 ns/op
|
||||
BenchmarkByPosition-8 10000000 202 ns/op
|
||||
BenchmarkInsertAtPosition-8 3000000 485 ns/op
|
||||
|
||||
CPU profiling has shown that the most expensive thing we do here
|
||||
is call Compare. A potential optimization for gets only is to
|
||||
do a binary search in the forward/width lists instead of visiting
|
||||
every value. We could also use generics if Golang had them and
|
||||
let the consumer specify primitive types, which would speed up
|
||||
these operation dramatically.
|
||||
*/
|
||||
package skip
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
const p = .5 // the p level defines the probability that a node
|
||||
// with a value at level i also has a value at i+1. This number
|
||||
// is also important in determining max level. Max level will
|
||||
// be defined as L(N) where L = log base (1/p) of n where n
|
||||
// is the number of items in the list and N is the number of possible
|
||||
// items in the universe. If p = .5 then maxlevel = 32 is appropriate
|
||||
// for uint32.
|
||||
|
||||
// lockedSource is an implementation of rand.Source that is safe for
|
||||
// concurrent use by multiple goroutines. The code is modeled after
|
||||
// https://golang.org/src/math/rand/rand.go.
|
||||
type lockedSource struct {
|
||||
mu sync.Mutex
|
||||
src rand.Source
|
||||
}
|
||||
|
||||
// Int63 implements the rand.Source interface.
|
||||
func (ls *lockedSource) Int63() (n int64) {
|
||||
ls.mu.Lock()
|
||||
n = ls.src.Int63()
|
||||
ls.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// Seed implements the rand.Source interface.
|
||||
func (ls *lockedSource) Seed(seed int64) {
|
||||
ls.mu.Lock()
|
||||
ls.src.Seed(seed)
|
||||
ls.mu.Unlock()
|
||||
}
|
||||
|
||||
// generator will be the common generator to create random numbers. It
|
||||
// is seeded with unix nanosecond when this line is executed at runtime,
|
||||
// and only executed once ensuring all random numbers come from the same
|
||||
// randomly seeded generator.
|
||||
var generator = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())})
|
||||
|
||||
func generateLevel(maxLevel uint8) uint8 {
|
||||
var level uint8
|
||||
for level = uint8(1); level < maxLevel-1; level++ {
|
||||
if generator.Float64() >= p {
|
||||
|
||||
return level
|
||||
}
|
||||
}
|
||||
|
||||
return level
|
||||
}
|
||||
|
||||
func insertNode(sl *SkipList, n *node, cmp Comparator, pos uint64, cache nodes, posCache widths, allowDuplicate bool) Comparator {
|
||||
if !allowDuplicate && n != nil && n.Compare(cmp) == 0 { // a simple update in this case
|
||||
oldEntry := n.entry
|
||||
n.entry = cmp
|
||||
return oldEntry
|
||||
}
|
||||
atomic.AddUint64(&sl.num, 1)
|
||||
|
||||
nodeLevel := generateLevel(sl.maxLevel)
|
||||
if nodeLevel > sl.level {
|
||||
for i := sl.level; i < nodeLevel; i++ {
|
||||
cache[i] = sl.head
|
||||
}
|
||||
sl.level = nodeLevel
|
||||
}
|
||||
|
||||
nn := newNode(cmp, nodeLevel)
|
||||
for i := uint8(0); i < nodeLevel; i++ {
|
||||
if i == 0 {
|
||||
nn.preNode = cache[i]
|
||||
if cache[i].forward[i] != nil {
|
||||
cache[i].forward[i].preNode = nn
|
||||
}
|
||||
}
|
||||
|
||||
nn.forward[i] = cache[i].forward[i]
|
||||
cache[i].forward[i] = nn
|
||||
|
||||
formerWidth := cache[i].widths[i]
|
||||
if formerWidth == 0 {
|
||||
nn.widths[i] = 0
|
||||
} else {
|
||||
nn.widths[i] = posCache[i] + formerWidth + 1 - pos
|
||||
}
|
||||
|
||||
if cache[i].forward[i] != nil {
|
||||
cache[i].widths[i] = pos - posCache[i]
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
for i := nodeLevel; i < sl.level; i++ {
|
||||
if cache[i].forward[i] == nil {
|
||||
continue
|
||||
}
|
||||
cache[i].widths[i]++
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func splitAt(sl *SkipList, index uint64) (*SkipList, *SkipList) {
|
||||
right := &SkipList{}
|
||||
right.maxLevel = sl.maxLevel
|
||||
right.level = sl.level
|
||||
right.cache = make(nodes, sl.maxLevel)
|
||||
right.posCache = make(widths, sl.maxLevel)
|
||||
right.head = newNode(nil, sl.maxLevel)
|
||||
sl.searchByPosition(index, sl.cache, sl.posCache) // populate the cache that needs updating
|
||||
|
||||
for i := uint8(0); i <= sl.level; i++ {
|
||||
right.head.forward[i] = sl.cache[i].forward[i]
|
||||
if sl.cache[i].forward[i] != nil {
|
||||
right.head.widths[i] = sl.cache[i].widths[i] - (index - sl.posCache[i])
|
||||
}
|
||||
sl.cache[i].widths[i] = 0
|
||||
sl.cache[i].forward[i] = nil
|
||||
}
|
||||
|
||||
right.num = sl.Len() - index // right is not in user's hands yet
|
||||
atomic.AddUint64(&sl.num, -right.num)
|
||||
|
||||
sl.resetMaxLevel()
|
||||
right.resetMaxLevel()
|
||||
|
||||
return sl, right
|
||||
}
|
||||
|
||||
// Skip list is a datastructure that probabalistically determines
|
||||
// relationships between nodes. This results in a structure
|
||||
// that performs similarly to a BST but is much easier to build
|
||||
// from a programmatic perspective (no rotations).
|
||||
type SkipList struct {
|
||||
maxLevel, level uint8
|
||||
head *node
|
||||
num uint64
|
||||
// a list of nodes that can be reused, should reduce
|
||||
// the number of allocations in the insert/delete case.
|
||||
cache nodes
|
||||
posCache widths
|
||||
}
|
||||
|
||||
// init will initialize this skiplist. The parameter is expected
|
||||
// to be of some uint type which will set this skiplist's maximum
|
||||
// level.
|
||||
func (sl *SkipList) init(ifc interface{}) {
|
||||
switch ifc.(type) {
|
||||
case uint8:
|
||||
sl.maxLevel = 8
|
||||
case uint16:
|
||||
sl.maxLevel = 16
|
||||
case uint32:
|
||||
sl.maxLevel = 32
|
||||
case uint64, uint:
|
||||
sl.maxLevel = 64
|
||||
}
|
||||
sl.cache = make(nodes, sl.maxLevel)
|
||||
sl.posCache = make(widths, sl.maxLevel)
|
||||
sl.head = newNode(nil, sl.maxLevel)
|
||||
}
|
||||
|
||||
func (sl *SkipList) search(cmp Comparator, update nodes, widths widths) (*node, uint64) {
|
||||
if sl.Len() == 0 { // nothing in the list
|
||||
return nil, 1
|
||||
}
|
||||
|
||||
var pos uint64 = 0
|
||||
var offset uint8
|
||||
var alreadyChecked *node
|
||||
n := sl.head
|
||||
for i := uint8(0); i <= sl.level; i++ {
|
||||
offset = sl.level - i
|
||||
for n.forward[offset] != nil && n.forward[offset] != alreadyChecked && n.forward[offset].Compare(cmp) < 0 {
|
||||
pos += n.widths[offset]
|
||||
n = n.forward[offset]
|
||||
}
|
||||
|
||||
alreadyChecked = n
|
||||
if update != nil {
|
||||
update[offset] = n
|
||||
widths[offset] = pos
|
||||
}
|
||||
}
|
||||
|
||||
return n.forward[0], pos + 1
|
||||
}
|
||||
|
||||
func (sl *SkipList) resetMaxLevel() {
|
||||
if sl.level < 1 {
|
||||
sl.level = 1
|
||||
return
|
||||
}
|
||||
for sl.head.forward[sl.level-1] == nil && sl.level > 1 {
|
||||
sl.level--
|
||||
}
|
||||
}
|
||||
|
||||
func (sl *SkipList) searchByPosition(position uint64, update nodes, widths widths) (*node, uint64) {
|
||||
if sl.Len() == 0 { // nothing in the list
|
||||
return nil, 1
|
||||
}
|
||||
|
||||
if position > sl.Len() {
|
||||
return nil, 1
|
||||
}
|
||||
|
||||
var pos uint64 = 0
|
||||
var offset uint8
|
||||
n := sl.head
|
||||
for i := uint8(0); i <= sl.level; i++ {
|
||||
offset = sl.level - i
|
||||
for n.forward[offset] != nil && pos+n.widths[offset] <= position {
|
||||
pos += n.widths[offset]
|
||||
n = n.forward[offset]
|
||||
}
|
||||
|
||||
if update != nil {
|
||||
update[offset] = n
|
||||
widths[offset] = pos
|
||||
}
|
||||
}
|
||||
|
||||
return n, pos + 1
|
||||
}
|
||||
|
||||
// Get will retrieve values associated with the keys provided. If an
|
||||
// associated value could not be found, a nil is returned in its place.
|
||||
// This is an O(log n) operation.
|
||||
func (sl *SkipList) Get(comparators ...Comparator) Comparators {
|
||||
result := make(Comparators, 0, len(comparators))
|
||||
|
||||
var n *node
|
||||
for _, cmp := range comparators {
|
||||
n, _ = sl.search(cmp, nil, nil)
|
||||
if n != nil && n.Compare(cmp) == 0 {
|
||||
result = append(result, n.entry)
|
||||
} else {
|
||||
result = append(result, nil)
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// GetWithPosition will retrieve the value with the provided key and
|
||||
// return the position of that value within the list. Returns nil, 0
|
||||
// if an associated value could not be found.
|
||||
func (sl *SkipList) GetWithPosition(cmp Comparator) (Comparator, uint64) {
|
||||
n, pos := sl.search(cmp, nil, nil)
|
||||
if n == nil {
|
||||
return nil, 0
|
||||
}
|
||||
|
||||
return n.entry, pos - 1
|
||||
}
|
||||
|
||||
// ByPosition returns the Comparator at the given position.
|
||||
func (sl *SkipList) ByPosition(position uint64) Comparator {
|
||||
n, _ := sl.searchByPosition(position+1, nil, nil)
|
||||
if n == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return n.entry
|
||||
}
|
||||
|
||||
func (sl *SkipList) insert(cmp Comparator) Comparator {
|
||||
n, pos := sl.search(cmp, sl.cache, sl.posCache)
|
||||
return insertNode(sl, n, cmp, pos, sl.cache, sl.posCache, false)
|
||||
}
|
||||
|
||||
// Insert will insert the provided comparators into the list. Returned
|
||||
// is a list of comparators that were overwritten. This is expected to
|
||||
// be an O(log n) operation.
|
||||
func (sl *SkipList) Insert(comparators ...Comparator) Comparators {
|
||||
overwritten := make(Comparators, 0, len(comparators))
|
||||
for _, cmp := range comparators {
|
||||
overwritten = append(overwritten, sl.insert(cmp))
|
||||
}
|
||||
|
||||
return overwritten
|
||||
}
|
||||
|
||||
func (sl *SkipList) insertAtPosition(position uint64, cmp Comparator) {
|
||||
if position > sl.Len() {
|
||||
position = sl.Len()
|
||||
}
|
||||
n, pos := sl.searchByPosition(position, sl.cache, sl.posCache)
|
||||
insertNode(sl, n, cmp, pos, sl.cache, sl.posCache, true)
|
||||
}
|
||||
|
||||
// InsertAtPosition will insert the provided Comparator at the provided position.
|
||||
// If position is greater than the length of the skiplist, the Comparator
|
||||
// is appended. This method bypasses order checks and checks for
|
||||
// duplicates so use with caution.
|
||||
func (sl *SkipList) InsertAtPosition(position uint64, cmp Comparator) {
|
||||
sl.insertAtPosition(position, cmp)
|
||||
}
|
||||
|
||||
func (sl *SkipList) replaceAtPosition(position uint64, cmp Comparator) {
|
||||
n, _ := sl.searchByPosition(position+1, nil, nil)
|
||||
if n == nil {
|
||||
return
|
||||
}
|
||||
|
||||
n.entry = cmp
|
||||
}
|
||||
|
||||
// Replace at position will replace the Comparator at the provided position
|
||||
// with the provided Comparator. If the provided position does not exist,
|
||||
// this operation is a no-op.
|
||||
func (sl *SkipList) ReplaceAtPosition(position uint64, cmp Comparator) {
|
||||
sl.replaceAtPosition(position, cmp)
|
||||
}
|
||||
|
||||
func (sl *SkipList) delete(cmp Comparator) Comparator {
|
||||
n, _ := sl.search(cmp, sl.cache, sl.posCache)
|
||||
|
||||
if n == nil || n.Compare(cmp) != 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
atomic.AddUint64(&sl.num, ^uint64(0)) // decrement
|
||||
|
||||
for i := uint8(0); i <= sl.level; i++ {
|
||||
if sl.cache[i].forward[i] != n {
|
||||
if sl.cache[i].forward[i] != nil {
|
||||
sl.cache[i].widths[i]--
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if i == 0 {
|
||||
if n.forward[i] != nil {
|
||||
n.forward[i].preNode = sl.cache[i]
|
||||
}
|
||||
n.preNode = nil
|
||||
}
|
||||
|
||||
sl.cache[i].widths[i] += n.widths[i] - 1
|
||||
sl.cache[i].forward[i] = n.forward[i]
|
||||
}
|
||||
|
||||
for sl.level > 1 && sl.head.forward[sl.level-1] == nil {
|
||||
sl.head.widths[sl.level] = 0
|
||||
sl.level--
|
||||
}
|
||||
|
||||
return n.entry
|
||||
}
|
||||
|
||||
// Delete will remove the provided keys from the skiplist and return
|
||||
// a list of in-order Comparators that were deleted. This is a no-op if
|
||||
// an associated key could not be found. This is an O(log n) operation.
|
||||
func (sl *SkipList) Delete(comparators ...Comparator) Comparators {
|
||||
deleted := make(Comparators, 0, len(comparators))
|
||||
|
||||
for _, cmp := range comparators {
|
||||
deleted = append(deleted, sl.delete(cmp))
|
||||
}
|
||||
|
||||
return deleted
|
||||
}
|
||||
|
||||
// Len returns the number of items in this skiplist.
|
||||
func (sl *SkipList) Len() uint64 {
|
||||
return atomic.LoadUint64(&sl.num)
|
||||
}
|
||||
|
||||
func (sl *SkipList) iterAtPosition(pos uint64) *iterator {
|
||||
n, _ := sl.searchByPosition(pos, nil, nil)
|
||||
if n == nil || n.entry == nil {
|
||||
return nilIterator()
|
||||
}
|
||||
|
||||
return &iterator{
|
||||
first: true,
|
||||
n: n,
|
||||
}
|
||||
}
|
||||
|
||||
// IterAtPosition is the sister method to Iter only the user defines
|
||||
// a position in the skiplist to begin iteration instead of a value.
|
||||
func (sl *SkipList) IterAtPosition(pos uint64) Iterator {
|
||||
return sl.iterAtPosition(pos + 1)
|
||||
}
|
||||
|
||||
func (sl *SkipList) iter(cmp Comparator) *iterator {
|
||||
n, _ := sl.search(cmp, nil, nil)
|
||||
if n == nil {
|
||||
return nilIterator()
|
||||
}
|
||||
|
||||
return &iterator{
|
||||
first: true,
|
||||
n: n,
|
||||
}
|
||||
}
|
||||
|
||||
// Iter will return an iterator that can be used to iterate
|
||||
// over all the values with a key equal to or greater than
|
||||
// the key provided.
|
||||
func (sl *SkipList) Iter(cmp Comparator) Iterator {
|
||||
return sl.iter(cmp)
|
||||
}
|
||||
|
||||
// SplitAt will split the current skiplist into two lists. The first
|
||||
// skiplist returned is the "left" list and the second is the "right."
|
||||
// The index defines the last item in the left list. If index is greater
|
||||
// then the length of this list, only the left skiplist is returned
|
||||
// and the right will be nil. This is a mutable operation and modifies
|
||||
// the content of this list.
|
||||
func (sl *SkipList) SplitAt(index uint64) (*SkipList, *SkipList) {
|
||||
index++ // 0-index offset
|
||||
if index >= sl.Len() {
|
||||
return sl, nil
|
||||
}
|
||||
return splitAt(sl, index)
|
||||
}
|
||||
|
||||
// New will allocate, initialize, and return a new skiplist.
|
||||
// The provided parameter should be of type uint and will determine
|
||||
// the maximum possible level that will be created to ensure
|
||||
// a random and quick distribution of levels. Parameter must
|
||||
// be a uint type.
|
||||
func New(ifc interface{}) *SkipList {
|
||||
sl := &SkipList{}
|
||||
sl.init(ifc)
|
||||
return sl
|
||||
}
|
||||
@@ -6,10 +6,15 @@ go tool nm ./originserver.exe |grep buildtime
|
||||
|
||||
//编译传入编译时间信息
|
||||
go build -ldflags "-X 'github.com/duanhf2012/origin/util/buildtime.BuildTime=20200101'"
|
||||
go build -ldflags "-X github.com/duanhf2012/origin/util/buildtime.BuildTime=20200101 -X github.com/duanhf2012/origin/util/buildtime.BuildTag=debug"
|
||||
*/
|
||||
var BuildTime string
|
||||
|
||||
var BuildTag string
|
||||
|
||||
func GetBuildDateTime() string {
|
||||
return BuildTime
|
||||
}
|
||||
|
||||
func GetBuildTag() string {
|
||||
return BuildTag
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package coroutine
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/duanhf2012/origin/log"
|
||||
"reflect"
|
||||
"runtime/debug"
|
||||
)
|
||||
@@ -12,10 +13,11 @@ func F(callback interface{},recoverNum int, args ...interface{}) {
|
||||
var coreInfo string
|
||||
coreInfo = string(debug.Stack())
|
||||
coreInfo += "\n" + fmt.Sprintf("Core information is %v\n", r)
|
||||
fmt.Print(coreInfo)
|
||||
|
||||
if recoverNum==-1 ||recoverNum-1 >= 0 {
|
||||
log.SError(coreInfo)
|
||||
if recoverNum > 0{
|
||||
recoverNum -= 1
|
||||
}
|
||||
if recoverNum == -1 || recoverNum > 0 {
|
||||
go F(callback,recoverNum, args...)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
package math
|
||||
|
||||
import "github.com/duanhf2012/origin/log"
|
||||
|
||||
type NumberType interface {
|
||||
int | int8 | int16 | int32 | int64 | float32 | float64 | uint | uint8 | uint16 | uint32 | uint64
|
||||
}
|
||||
@@ -35,3 +37,42 @@ func Abs[NumType SignedNumberType](Num NumType) NumType {
|
||||
|
||||
return Num
|
||||
}
|
||||
|
||||
|
||||
func Add[NumType NumberType](number1 NumType, number2 NumType) NumType {
|
||||
ret := number1 + number2
|
||||
if number2> 0 && ret < number1 {
|
||||
log.SStack("Calculation overflow , number1 is ",number1," number2 is ",number2)
|
||||
}else if (number2<0 && ret > number1){
|
||||
log.SStack("Calculation overflow , number1 is ",number1," number2 is ",number2)
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
func Sub[NumType NumberType](number1 NumType, number2 NumType) NumType {
|
||||
ret := number1 - number2
|
||||
if number2> 0 && ret > number1 {
|
||||
log.SStack("Calculation overflow , number1 is ",number1," number2 is ",number2)
|
||||
}else if (number2<0 && ret < number1){
|
||||
log.SStack("Calculation overflow , number1 is ",number1," number2 is ",number2)
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
|
||||
func Mul[NumType NumberType](number1 NumType, number2 NumType) NumType {
|
||||
ret := number1 * number2
|
||||
if number1 == 0 || number2 == 0 {
|
||||
return ret
|
||||
}
|
||||
|
||||
if ret / number2 == number1 {
|
||||
return ret
|
||||
}
|
||||
|
||||
log.SStack("Calculation overflow , number1 is ",number1," number2 is ",number2)
|
||||
return ret
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user