Compare commits

..

12 Commits

Author SHA1 Message Date
boyce
03f8ba0316 精简事件通知 2024-04-30 17:33:34 +08:00
boyce
277480a7f0 Merge branch 'v2' of https://github.com/duanhf2012/origin into v2 2024-04-29 09:24:04 +08:00
boyce
647a654a36 补充优化说明文档 2024-04-29 09:23:54 +08:00
boyce
de483a88f1 优化停服 2024-04-28 18:02:20 +08:00
boyce
bbbb511b5f 新增kafka模块 2024-04-28 11:21:59 +08:00
boyce
0489ee3ef4 新增gin模块&优化pb处理器 2024-04-28 11:13:46 +08:00
boyce
692dacda0c 删除mongomodule,使用mongodbmodule替代 2024-04-28 10:40:56 +08:00
boyce
7f86b1007d 优化支持自定义logger 2024-04-26 18:13:41 +08:00
boyce
ba5e30ae2e 补充readme说明 2024-04-26 15:07:42 +08:00
boyce
0b1a1d2283 Merge branch 'master' into v2 2024-04-26 14:56:28 +08:00
origin
7780947a96 Merge pull request #878 from hjdtpz/master
导出RankData.RefreshTimestamp
2024-04-26 14:48:47 +08:00
hjdtpz
4c169cf0bb 导出RankData.RefreshTimestamp 2024-04-22 21:47:49 +08:00
20 changed files with 1405 additions and 775 deletions

827
README.md

File diff suppressed because it is too large Load Diff

View File

@@ -54,7 +54,6 @@ type Cluster struct {
discoveryInfo DiscoveryInfo //服务发现配置
rpcMode RpcMode
//masterDiscoveryNodeList []NodeInfo //配置发现Master结点
globalCfg interface{} //全局配置
localServiceCfg map[string]interface{} //map[serviceName]配置数据*
@@ -70,7 +69,6 @@ type Cluster struct {
rpcEventLocker sync.RWMutex //Rpc事件监听保护锁
mapServiceListenRpcEvent map[string]struct{} //ServiceName
mapServiceListenDiscoveryEvent map[string]struct{} //ServiceName
}
func GetCluster() *Cluster {
@@ -228,8 +226,6 @@ func (cls *Cluster) Init(localNodeId string, setupServiceFun SetupServiceFun) er
}
service.RegRpcEventFun = cls.RegRpcEvent
service.UnRegRpcEventFun = cls.UnRegRpcEvent
service.RegDiscoveryServiceEventFun = cls.RegDiscoveryEvent
service.UnRegDiscoveryServiceEventFun = cls.UnReDiscoveryEvent
err = cls.serviceDiscovery.InitDiscovery(localNodeId, cls.serviceDiscoveryDelNode, cls.serviceDiscoverySetNodeInfo)
if err != nil {
@@ -322,23 +318,12 @@ func (cls *Cluster) NotifyAllService(event event.IEvent){
}
func (cls *Cluster) TriggerDiscoveryEvent(bDiscovery bool, nodeId string, serviceName []string) {
cls.rpcEventLocker.Lock()
defer cls.rpcEventLocker.Unlock()
for sName, _ := range cls.mapServiceListenDiscoveryEvent {
ser := service.GetService(sName)
if ser == nil {
log.Error("cannot find service",log.Any("services",serviceName))
continue
}
var eventData service.DiscoveryServiceEvent
eventData.IsDiscovery = bDiscovery
eventData.NodeId = nodeId
eventData.ServiceName = serviceName
ser.(service.IModule).NotifyEvent(&eventData)
}
var eventData service.DiscoveryServiceEvent
eventData.IsDiscovery = bDiscovery
eventData.NodeId = nodeId
eventData.ServiceName = serviceName
cls.NotifyAllService(&eventData)
}
func (cls *Cluster) GetLocalNodeInfo() *NodeInfo {
@@ -361,25 +346,6 @@ func (cls *Cluster) UnRegRpcEvent(serviceName string) {
cls.rpcEventLocker.Unlock()
}
func (cls *Cluster) RegDiscoveryEvent(serviceName string) {
cls.rpcEventLocker.Lock()
if cls.mapServiceListenDiscoveryEvent == nil {
cls.mapServiceListenDiscoveryEvent = map[string]struct{}{}
}
cls.mapServiceListenDiscoveryEvent[serviceName] = struct{}{}
cls.rpcEventLocker.Unlock()
}
func (cls *Cluster) UnReDiscoveryEvent(serviceName string) {
cls.rpcEventLocker.Lock()
delete(cls.mapServiceListenDiscoveryEvent, serviceName)
cls.rpcEventLocker.Unlock()
}
func HasService(nodeId string, serviceName string) bool {
cluster.locker.RLock()
defer cluster.locker.RUnlock()

View File

@@ -236,7 +236,6 @@ func (processor *EventProcessor) castEvent(event IEvent){
eventProcessor,ok := processor.mapListenerEvent[event.GetEventType()]
if ok == false || processor == nil{
log.Debug("event is not listen",log.Int("event type",int(event.GetEventType())))
return
}

View File

@@ -22,7 +22,10 @@ var LogSize int64
var LogChannelCap int
var LogPath string
var LogLevel slog.Level = LevelTrace
var gLogger, _ = NewTextLogger(LevelDebug, "", "",true,LogChannelCap)
var isSetLogger bool
var memPool = bytespool.NewMemAreaPool()
// levels
@@ -37,6 +40,21 @@ const (
LevelFatal = slog.Level(20)
)
type ILogger interface {
Trace(msg string, args ...any)
Debug(msg string, args ...any)
Info(msg string, args ...any)
Warning(msg string, args ...any)
Error(msg string, args ...any)
Stack(msg string, args ...any)
Dump(msg string, args ...any)
Fatal(msg string, args ...any)
DoSPrintf(level slog.Level,a []interface{})
FormatHeader(buf *Buffer,level slog.Level,calldepth int)
Close()
}
type Logger struct {
Slogger *slog.Logger
@@ -47,7 +65,6 @@ type Logger struct {
type IoWriter struct {
outFile io.Writer // destination for output
outConsole io.Writer //os.Stdout
writeBytes int64
logChannel chan []byte
wg sync.WaitGroup
@@ -122,8 +139,8 @@ func (iw *IoWriter) Write(p []byte) (n int, err error){
func (iw *IoWriter) writeIo(p []byte) (n int, err error){
n,err = iw.writeFile(p)
if iw.outConsole != nil {
n,err = iw.outConsole.Write(p)
if OpenConsole {
n,err = os.Stdout.Write(p)
}
return
@@ -217,17 +234,12 @@ func (iw *IoWriter) swichFile() error{
iw.fileDay = now.Day()
iw.fileCreateTime = now.Unix()
atomic.StoreInt64(&iw.writeBytes,0)
if OpenConsole == true {
iw.outConsole = os.Stdout
}
}else{
iw.outConsole = os.Stdout
}
return nil
}
func NewTextLogger(level slog.Level,pathName string,filePrefix string,addSource bool,logChannelCap int) (*Logger,error){
func NewTextLogger(level slog.Level,pathName string,filePrefix string,addSource bool,logChannelCap int) (ILogger,error){
var logger Logger
logger.ioWriter.filePath = pathName
logger.ioWriter.fileprefix = filePrefix
@@ -242,7 +254,7 @@ func NewTextLogger(level slog.Level,pathName string,filePrefix string,addSource
return &logger,nil
}
func NewJsonLogger(level slog.Level,pathName string,filePrefix string,addSource bool,logChannelCap int) (*Logger,error){
func NewJsonLogger(level slog.Level,pathName string,filePrefix string,addSource bool,logChannelCap int) (ILogger,error){
var logger Logger
logger.ioWriter.filePath = pathName
logger.ioWriter.fileprefix = filePrefix
@@ -296,13 +308,18 @@ func (logger *Logger) Fatal(msg string, args ...any) {
os.Exit(1)
}
// It's dangerous to call the method on logging
func Export(logger *Logger) {
if logger != nil {
// It's non-thread-safe
func SetLogger(logger ILogger) {
if logger != nil && isSetLogger == false {
gLogger = logger
isSetLogger = true
}
}
func GetLogger() ILogger{
return gLogger
}
func Trace(msg string, args ...any){
gLogger.Trace(msg, args...)
}
@@ -415,7 +432,7 @@ func Group(key string, args ...any) slog.Attr {
return slog.Group(key, args...)
}
func (logger *Logger) doSPrintf(level slog.Level,a []interface{}) {
func (logger *Logger) DoSPrintf(level slog.Level,a []interface{}) {
if logger.Slogger.Enabled(context.Background(),level) == false{
return
}
@@ -425,7 +442,7 @@ func (logger *Logger) doSPrintf(level slog.Level,a []interface{}) {
logger.sBuff.Reset()
logger.formatHeader(&logger.sBuff,level,3)
logger.FormatHeader(&logger.sBuff,level,3)
for _,s := range a {
logger.sBuff.AppendString(slog.AnyValue(s).String())
@@ -435,46 +452,46 @@ func (logger *Logger) doSPrintf(level slog.Level,a []interface{}) {
}
func (logger *Logger) STrace(a ...interface{}) {
logger.doSPrintf(LevelTrace,a)
logger.DoSPrintf(LevelTrace,a)
}
func (logger *Logger) SDebug(a ...interface{}) {
logger.doSPrintf(LevelDebug,a)
logger.DoSPrintf(LevelDebug,a)
}
func (logger *Logger) SInfo(a ...interface{}) {
logger.doSPrintf(LevelInfo,a)
logger.DoSPrintf(LevelInfo,a)
}
func (logger *Logger) SWarning(a ...interface{}) {
logger.doSPrintf(LevelWarning,a)
logger.DoSPrintf(LevelWarning,a)
}
func (logger *Logger) SError(a ...interface{}) {
logger.doSPrintf(LevelError,a)
logger.DoSPrintf(LevelError,a)
}
func STrace(a ...interface{}) {
gLogger.doSPrintf(LevelTrace,a)
gLogger.DoSPrintf(LevelTrace,a)
}
func SDebug(a ...interface{}) {
gLogger.doSPrintf(LevelDebug,a)
gLogger.DoSPrintf(LevelDebug,a)
}
func SInfo(a ...interface{}) {
gLogger.doSPrintf(LevelInfo,a)
gLogger.DoSPrintf(LevelInfo,a)
}
func SWarning(a ...interface{}) {
gLogger.doSPrintf(LevelWarning,a)
gLogger.DoSPrintf(LevelWarning,a)
}
func SError(a ...interface{}) {
gLogger.doSPrintf(LevelError,a)
gLogger.DoSPrintf(LevelError,a)
}
func (logger *Logger) formatHeader(buf *Buffer,level slog.Level,calldepth int) {
func (logger *Logger) FormatHeader(buf *Buffer,level slog.Level,calldepth int) {
t := time.Now()
var file string
var line int

View File

@@ -10,9 +10,9 @@ type RawMessageInfo struct {
msgHandler RawMessageHandler
}
type RawMessageHandler func(clientId uint64,packType uint16,msg []byte)
type RawConnectHandler func(clientId uint64)
type UnknownRawMessageHandler func(clientId uint64,msg []byte)
type RawMessageHandler func(clientId string,packType uint16,msg []byte)
type RawConnectHandler func(clientId string)
type UnknownRawMessageHandler func(clientId string,msg []byte)
const RawMsgTypeSize = 2
type PBRawProcessor struct {
@@ -38,14 +38,14 @@ func (pbRawProcessor *PBRawProcessor) SetByteOrder(littleEndian bool) {
}
// must goroutine safe
func (pbRawProcessor *PBRawProcessor ) MsgRoute(clientId uint64, msg interface{}) error{
func (pbRawProcessor *PBRawProcessor ) MsgRoute(clientId string, msg interface{}) error{
pPackInfo := msg.(*PBRawPackInfo)
pbRawProcessor.msgHandler(clientId,pPackInfo.typ,pPackInfo.rawMsg)
return nil
}
// must goroutine safe
func (pbRawProcessor *PBRawProcessor ) Unmarshal(clientId uint64,data []byte) (interface{}, error) {
func (pbRawProcessor *PBRawProcessor ) Unmarshal(clientId string,data []byte) (interface{}, error) {
var msgType uint16
if pbRawProcessor.LittleEndian == true {
msgType = binary.LittleEndian.Uint16(data[:2])
@@ -57,7 +57,7 @@ func (pbRawProcessor *PBRawProcessor ) Unmarshal(clientId uint64,data []byte) (i
}
// must goroutine safe
func (pbRawProcessor *PBRawProcessor ) Marshal(clientId uint64,msg interface{}) ([]byte, error){
func (pbRawProcessor *PBRawProcessor ) Marshal(clientId string,msg interface{}) ([]byte, error){
pMsg := msg.(*PBRawPackInfo)
buff := make([]byte, 2, len(pMsg.rawMsg)+RawMsgTypeSize)
@@ -80,7 +80,7 @@ func (pbRawProcessor *PBRawProcessor) MakeRawMsg(msgType uint16,msg []byte,pbRaw
pbRawPackInfo.rawMsg = msg
}
func (pbRawProcessor *PBRawProcessor) UnknownMsgRoute(clientId uint64,msg interface{}){
func (pbRawProcessor *PBRawProcessor) UnknownMsgRoute(clientId string,msg interface{}){
if pbRawProcessor.unknownMessageHandler == nil {
return
}
@@ -88,11 +88,11 @@ func (pbRawProcessor *PBRawProcessor) UnknownMsgRoute(clientId uint64,msg interf
}
// connect event
func (pbRawProcessor *PBRawProcessor) ConnectedRoute(clientId uint64){
func (pbRawProcessor *PBRawProcessor) ConnectedRoute(clientId string){
pbRawProcessor.connectHandler(clientId)
}
func (pbRawProcessor *PBRawProcessor) DisConnectedRoute(clientId uint64){
func (pbRawProcessor *PBRawProcessor) DisConnectedRoute(clientId string){
pbRawProcessor.disconnectHandler(clientId)
}

View File

@@ -117,7 +117,7 @@ func setConfigPath(val interface{}) error {
}
func getRunProcessPid(nodeId string) (int, error) {
f, err := os.OpenFile(fmt.Sprintf("%s_%d.pid", os.Args[0], nodeId), os.O_RDONLY, 0600)
f, err := os.OpenFile(fmt.Sprintf("%s_%s.pid", os.Args[0], nodeId), os.O_RDONLY, 0600)
defer f.Close()
if err != nil {
return 0, err
@@ -201,7 +201,7 @@ func initLog() error {
fmt.Printf("cannot create log file!\n")
return err
}
log.Export(logger)
log.SetLogger(logger)
return nil
}

View File

@@ -368,12 +368,12 @@ func (s *Service) UnRegNatsConnListener() {
func (s *Service) RegDiscoverListener(discoveryServiceListener rpc.IDiscoveryServiceListener) {
s.discoveryServiceLister = discoveryServiceListener
s.RegEventReceiverFunc(event.Sys_Event_DiscoverService,s.GetEventHandler(),s.OnDiscoverServiceEvent)
RegDiscoveryServiceEventFun(s.GetName())
RegRpcEventFun(s.GetName())
}
func (s *Service) UnRegDiscoverListener() {
s.UnRegEventReceiverFunc(event.Sys_Event_DiscoverService,s.GetEventHandler())
UnRegDiscoveryServiceEventFun(s.GetName())
UnRegRpcEventFun(s.GetName())
}
func (s *Service) PushRpcRequest(rpcRequest *rpc.RpcRequest) error{

View File

@@ -14,9 +14,6 @@ type RegDiscoveryServiceEventFunType func(serviceName string)
var RegRpcEventFun RegRpcEventFunType
var UnRegRpcEventFun RegRpcEventFunType
var RegDiscoveryServiceEventFun RegDiscoveryServiceEventFunType
var UnRegDiscoveryServiceEventFun RegDiscoveryServiceEventFunType
func init(){
mapServiceName = map[string]IService{}
setupServiceList = []IService{}

View File

@@ -0,0 +1,189 @@
package ginmodule
import (
"context"
"datacenter/common/processor"
"github.com/duanhf2012/origin/v2/event"
"github.com/duanhf2012/origin/v2/log"
"github.com/duanhf2012/origin/v2/service"
"github.com/gin-gonic/gin"
"log/slog"
"net/http"
"strings"
)
type GinModule struct {
service.Module
*GinConf
*gin.Engine
srv *http.Server
processor []processor.IGinProcessor
}
type GinConf struct {
Addr string
}
const Sys_Event_Gin_Event event.EventType = -11
func (gm *GinModule) Init(conf *GinConf, engine *gin.Engine) {
gm.GinConf = conf
gm.Engine = engine
}
func (gm *GinModule) SetupDataProcessor(processor ...processor.IGinProcessor) {
gm.processor = processor
}
func (gm *GinModule) AppendDataProcessor(processor ...processor.IGinProcessor) {
gm.processor = append(gm.processor, processor...)
}
func (gm *GinModule) OnInit() error {
if gm.Engine == nil {
gm.Engine = gin.Default()
}
gm.srv = &http.Server{
Addr: gm.Addr,
Handler: gm.Engine,
}
gm.Engine.Use(Logger())
gm.Engine.Use(gin.Recovery())
gm.GetEventProcessor().RegEventReceiverFunc(Sys_Event_Gin_Event, gm.GetEventHandler(), gm.eventHandler)
return nil
}
func (gm *GinModule) eventHandler(ev event.IEvent) {
ginEvent := ev.(*GinEvent)
for _, handler := range ginEvent.handlersChain {
handler(ginEvent.c)
}
ginEvent.chanWait <- struct{}{}
}
func (gm *GinModule) Start() {
log.Info("http start listen", slog.Any("addr", gm.Addr))
go func() {
err := gm.srv.ListenAndServe()
if err != nil {
log.Error("ListenAndServe error", slog.Any("error", err.Error()))
}
}()
}
func (gm *GinModule) StartTLS(certFile, keyFile string) {
log.Info("http start listen", slog.Any("addr", gm.Addr))
go func() {
err := gm.srv.ListenAndServeTLS(certFile, keyFile)
if err != nil {
log.Fatal("ListenAndServeTLS error", slog.Any("error", err.Error()))
}
}()
}
func (gm *GinModule) Stop(ctx context.Context) {
if err := gm.srv.Shutdown(ctx); err != nil {
log.SError("Server Shutdown", slog.Any("error", err))
}
}
type GinEvent struct {
handlersChain gin.HandlersChain
chanWait chan struct{}
c *gin.Context
}
func (ge *GinEvent) GetEventType() event.EventType {
return Sys_Event_Gin_Event
}
func (gm *GinModule) handleMethod(httpMethod, relativePath string, handlers ...gin.HandlerFunc) gin.IRoutes {
return gm.Engine.Handle(httpMethod, relativePath, func(c *gin.Context) {
for _, p := range gm.processor {
_, err := p.Process(c)
if err != nil {
return
}
}
var ev GinEvent
chanWait := make(chan struct{})
ev.chanWait = chanWait
ev.handlersChain = handlers
ev.c = c
gm.NotifyEvent(&ev)
<-chanWait
})
}
func (gm *GinModule) SafeGET(relativePath string, handlers ...gin.HandlerFunc) gin.IRoutes {
return gm.handleMethod(http.MethodGet, relativePath, handlers...)
}
func (gm *GinModule) SafePOST(relativePath string, handlers ...gin.HandlerFunc) gin.IRoutes {
return gm.handleMethod(http.MethodPost, relativePath, handlers...)
}
func (gm *GinModule) SafeDELETE(relativePath string, handlers ...gin.HandlerFunc) gin.IRoutes {
return gm.handleMethod(http.MethodDelete, relativePath, handlers...)
}
func (gm *GinModule) SafePATCH(relativePath string, handlers ...gin.HandlerFunc) gin.IRoutes {
return gm.handleMethod(http.MethodPatch, relativePath, handlers...)
}
func (gm *GinModule) SafePut(relativePath string, handlers ...gin.HandlerFunc) gin.IRoutes {
return gm.handleMethod(http.MethodPut, relativePath, handlers...)
}
func GetIPWithProxyHeaders(c *gin.Context) string {
// 尝试从 X-Real-IP 头部获取真实 IP
ip := c.GetHeader("X-Real-IP")
// 如果 X-Real-IP 头部不存在,则尝试从 X-Forwarded-For 头部获取
if ip == "" {
ip = c.GetHeader("X-Forwarded-For")
}
// 如果两者都不存在,则使用默认的 ClientIP 方法获取 IP
if ip == "" {
ip = c.ClientIP()
}
return ip
}
func GetIPWithValidatedProxyHeaders(c *gin.Context) string {
// 获取代理头部
proxyHeaders := c.Request.Header.Get("X-Real-IP,X-Forwarded-For")
// 分割代理头部,取第一个 IP 作为真实 IP
ips := strings.Split(proxyHeaders, ",")
ip := strings.TrimSpace(ips[0])
// 如果 IP 格式合法,则使用获取到的 IP否则使用默认的 ClientIP 方法获取
if isValidIP(ip) {
return ip
} else {
ip = c.ClientIP()
return ip
}
}
// isValidIP 判断 IP 格式是否合法
func isValidIP(ip string) bool {
// 此处添加自定义的 IP 格式验证逻辑
// 例如,使用正则表达式验证 IP 格式
// ...
if ip == "" {
return false
}
return true
}

View File

@@ -0,0 +1,79 @@
package ginmodule
import (
"fmt"
"github.com/duanhf2012/origin/log"
"github.com/gin-gonic/gin"
"time"
)
// Logger 是一个自定义的日志中间件
func Logger() gin.HandlerFunc {
return func(c *gin.Context) {
// 处理请求前记录日志
// 开始时间
startTime := time.Now()
// 调用该请求的剩余处理程序
c.Next()
// 结束时间
endTime := time.Now()
// 执行时间
latencyTime := endTime.Sub(startTime)
// 请求IP
clientIP := c.ClientIP()
// remoteIP := c.RemoteIP()
// 请求方式
reqMethod := c.Request.Method
// 请求路由
reqUri := c.Request.RequestURI
// 请求协议
reqProto := c.Request.Proto
// 请求来源
repReferer := c.Request.Referer()
// 请求UA
reqUA := c.Request.UserAgent()
// 请求响应内容长度
resLength := c.Writer.Size()
if resLength < 0 {
resLength = 0
}
// 响应状态码
statusCode := c.Writer.Status()
log.SDebug(fmt.Sprintf(
"%s | %3d | %s %10s | \033[44;37m%-6s\033[0m %s %s | %10v | \"%s\" \"%s\"",
colorForStatus(statusCode),
statusCode,
colorForStatus(0),
clientIP,
// remoteIP,
reqMethod,
reqUri,
reqProto,
latencyTime,
reqUA,
repReferer,
))
}
}
// colorForStatus 根据 HTTP 状态码返回 ANSI 颜色代码
func colorForStatus(code int) string {
switch {
case code >= 200 && code < 300:
return "\033[42;1;37m" // green
case code >= 300 && code < 400:
return "\033[34m" // blue
case code >= 400 && code < 500:
return "\033[33m" // yellow
case code == 0:
return "\033[0m" // cancel
default:
return "\033[31m" // red
}
}

View File

@@ -0,0 +1,65 @@
package kafkamodule
import "github.com/IBM/sarama"
type KafkaAdmin struct {
sarama.ClusterAdmin
mapTopic map[string]sarama.TopicDetail
}
func (ka *KafkaAdmin) Setup(kafkaVersion string, addrs []string) error {
config := sarama.NewConfig()
var err error
config.Version, err = sarama.ParseKafkaVersion(kafkaVersion)
if err != nil {
return err
}
ka.ClusterAdmin, err = sarama.NewClusterAdmin(addrs, config)
if err != nil {
return err
}
ka.mapTopic, err = ka.GetTopics()
if err != nil {
ka.ClusterAdmin.Close()
return err
}
return nil
}
func (ka *KafkaAdmin) RefreshTopic() error {
var err error
ka.mapTopic, err = ka.GetTopics()
return err
}
func (ka *KafkaAdmin) HasTopic(topic string) bool {
_, ok := ka.mapTopic[topic]
return ok
}
func (ka *KafkaAdmin) GetTopicDetail(topic string) *sarama.TopicDetail {
topicDetail, ok := ka.mapTopic[topic]
if ok == false {
return nil
}
return &topicDetail
}
func (ka *KafkaAdmin) GetTopics() (map[string]sarama.TopicDetail, error) {
return ka.ListTopics()
}
// CreateTopic 创建主题
// numPartitions分区数
// replicationFactor副本数
// validateOnly参数执行操作时只进行参数验证而不实际执行操作
func (ka *KafkaAdmin) CreateTopic(topic string, numPartitions int32, replicationFactor int16, validateOnly bool) error {
return ka.ClusterAdmin.CreateTopic(topic, &sarama.TopicDetail{NumPartitions: numPartitions, ReplicationFactor: replicationFactor}, validateOnly)
}

View File

@@ -0,0 +1,289 @@
package kafkamodule
import (
"context"
"fmt"
"github.com/IBM/sarama"
"github.com/duanhf2012/origin/v2/log"
"sync"
"time"
)
type ConsumerGroup struct {
sarama.ConsumerGroup
waitGroup sync.WaitGroup
chanExit chan error
ready chan bool
cancel context.CancelFunc
groupId string
}
func NewConsumerConfig(kafkaVersion string, assignor string, offsetsInitial int64) (*sarama.Config, error) {
var err error
config := sarama.NewConfig()
config.Version, err = sarama.ParseKafkaVersion(kafkaVersion)
config.Consumer.Offsets.Initial = offsetsInitial
config.Consumer.Offsets.AutoCommit.Enable = false
switch assignor {
case "sticky":
// 黏性roundRobin,rebalance之后首先保证前面的分配,从后面剥离
// topic:T0{P0,P1,P2,P3,P4,P5},消费者:C1,C2
// ---------------before rebalance:即roundRobin
// C1: T0{P0} T0{P2} T0{P4}
// C2: T0{P1} T0{P3} T0{P5}
// ----------------after rebalance:增加了一个消费者
// C1: T0{P0} T0{P2}
// C2: T0{P1} T0{P3}
// C3: T0{P4} T0{P5} until每个消费者的分区数误差不超过1
config.Consumer.Group.Rebalance.GroupStrategies = []sarama.BalanceStrategy{sarama.NewBalanceStrategySticky()}
case "roundrobin":
// roundRobin --逐个平均分发
// topic: T0{P0,P1,P2},T1{P0,P1,P2,P3}两个消费者C1,C2
// C1: T0{P0} T0{P2} T1{P1} T1{P3}
// C2: T0{P1} T1{P0} T1{P2}
config.Consumer.Group.Rebalance.GroupStrategies = []sarama.BalanceStrategy{sarama.NewBalanceStrategyRoundRobin()}
case "range":
// 默认值 --一次平均分发
// topic: T0{P0,P1,P2,P3},T1{P0,P1,P2,P3},两个消费者C1,C2
// T1分区总数6 / 消费者数2 = 3 ,即该会话的分区每个消费者分3个
// T2分区总数4 / 消费者数2 = 2 ,即该会话的分区每个消费者分2个
// C1: T0{P0, P1, P2} T1{P0, P1}
// C2: T0{P3, P4, P5} T1{P2, P3}
config.Consumer.Group.Rebalance.GroupStrategies = []sarama.BalanceStrategy{sarama.NewBalanceStrategyRange()}
default:
return nil, fmt.Errorf("Unrecognized consumer group partition assignor: %s", assignor)
}
if err != nil {
return nil, err
}
return config, nil
}
type IMsgReceiver interface {
Receiver(msgs []*sarama.ConsumerMessage) bool
}
func (c *ConsumerGroup) Setup(addr []string, topics []string, groupId string, config *sarama.Config, receiverInterval time.Duration, maxReceiverNum int, msgReceiver IMsgReceiver) error {
var err error
c.ConsumerGroup, err = sarama.NewConsumerGroup(addr, groupId, config)
if err != nil {
return nil
}
c.groupId = groupId
c.chanExit = make(chan error, 1)
var handler ConsumerGroupHandler
handler.receiver = msgReceiver
handler.maxReceiverNum = maxReceiverNum
handler.receiverInterval = receiverInterval
handler.chanExit = c.chanExit
var ctx context.Context
ctx, c.cancel = context.WithCancel(context.Background())
c.waitGroup.Add(1)
go func() {
defer c.waitGroup.Done()
for {
if err = c.Consume(ctx, topics, &handler); err != nil {
// 当setup失败的时候error会返回到这里
log.Error("Error from consumer", log.Any("err", err))
return
}
// check if context was cancelled, signaling that the consumer should stop
if ctx.Err() != nil {
log.Info("consumer stop", log.Any("info", ctx.Err()))
}
c.chanExit <- err
}
}()
err = <-c.chanExit
//已经准备好了
return err
}
func (c *ConsumerGroup) Close() {
log.Info("close consumerGroup")
//1.cancel掉
c.cancel()
//2.关闭连接
err := c.ConsumerGroup.Close()
if err != nil {
log.Error("close consumerGroup fail", log.Any("err", err.Error()))
}
//3.等待退出
c.waitGroup.Wait()
}
type ConsumerGroupHandler struct {
receiver IMsgReceiver
receiverInterval time.Duration
maxReceiverNum int
//mapTopicOffset map[string]map[int32]int //map[topic]map[partitionId]offsetInfo
mapTopicData map[string]*MsgData
mx sync.Mutex
chanExit chan error
isRebalance bool //是否为再平衡
//stopSig *int32
}
type MsgData struct {
sync.Mutex
msg []*sarama.ConsumerMessage
mapPartitionOffset map[int32]int64
}
func (ch *ConsumerGroupHandler) Flush(session sarama.ConsumerGroupSession, topic string) {
if topic != "" {
msgData := ch.GetMsgData(topic)
msgData.flush(session, ch.receiver, topic)
return
}
for tp, msgData := range ch.mapTopicData {
msgData.flush(session, ch.receiver, tp)
}
}
func (ch *ConsumerGroupHandler) GetMsgData(topic string) *MsgData {
ch.mx.Lock()
defer ch.mx.Unlock()
msgData := ch.mapTopicData[topic]
if msgData == nil {
msgData = &MsgData{}
msgData.msg = make([]*sarama.ConsumerMessage, 0, ch.maxReceiverNum)
ch.mapTopicData[topic] = msgData
}
return msgData
}
func (md *MsgData) flush(session sarama.ConsumerGroupSession, receiver IMsgReceiver, topic string) {
if len(md.msg) == 0 {
return
}
//发送给接收者
for {
ok := receiver.Receiver(md.msg)
if ok == true {
break
}
}
for pId, offset := range md.mapPartitionOffset {
session.MarkOffset(topic, pId, offset+1, "")
log.Info(fmt.Sprintf("topic %s,pid %d,offset %d", topic, pId, offset+1))
}
session.Commit()
//log.Info("commit")
//time.Sleep(1000 * time.Second)
//置空
md.msg = md.msg[:0]
clear(md.mapPartitionOffset)
}
func (md *MsgData) appendMsg(session sarama.ConsumerGroupSession, msg *sarama.ConsumerMessage, receiver IMsgReceiver, maxReceiverNum int) {
md.Lock()
defer md.Unlock()
//收到的offset只会越来越大在
if md.mapPartitionOffset == nil {
md.mapPartitionOffset = make(map[int32]int64, 10)
}
md.mapPartitionOffset[msg.Partition] = msg.Offset
md.msg = append(md.msg, msg)
if len(md.msg) < maxReceiverNum {
return
}
md.flush(session, receiver, msg.Topic)
}
func (ch *ConsumerGroupHandler) AppendMsg(session sarama.ConsumerGroupSession, msg *sarama.ConsumerMessage) {
dataMsg := ch.GetMsgData(msg.Topic)
dataMsg.appendMsg(session, msg, ch.receiver, ch.maxReceiverNum)
}
func (ch *ConsumerGroupHandler) Setup(session sarama.ConsumerGroupSession) error {
ch.mapTopicData = make(map[string]*MsgData, 128)
if ch.isRebalance == false {
ch.chanExit <- nil
}
ch.isRebalance = true
return nil
}
func (ch *ConsumerGroupHandler) Cleanup(session sarama.ConsumerGroupSession) error {
ch.Flush(session, "")
return nil
}
func (ch *ConsumerGroupHandler) ConsumeClaim(session sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error {
ticker := time.NewTicker(ch.receiverInterval)
for {
select {
case msg := <-claim.Messages():
if msg == nil {
log.SWarning("claim will exit", log.Any("topic", claim.Topic()), log.Any("Partition", claim.Partition()))
return nil
}
ch.AppendMsg(session, msg)
case <-ticker.C:
ch.Flush(session, claim.Topic())
case <-session.Context().Done():
return nil
}
}
}
/*
阿里云参数说明https://sls.aliyun.com/doc/oscompatibledemo/sarama_go_kafka_consume.html
conf.Net.TLS.Enable = true
conf.Net.SASL.Enable = true
conf.Net.SASL.User = project
conf.Net.SASL.Password = fmt.Sprintf("%s#%s", accessId, accessKey)
conf.Net.SASL.Mechanism = "PLAIN"
conf.Net.TLS.Enable = true
conf.Net.SASL.Enable = true
conf.Net.SASL.User = project
conf.Net.SASL.Password = fmt.Sprintf("%s#%s", accessId, accessKey)
conf.Net.SASL.Mechanism = "PLAIN"
conf.Consumer.Fetch.Min = 1
conf.Consumer.Fetch.Default = 1024 * 1024
conf.Consumer.Retry.Backoff = 2 * time.Second
conf.Consumer.MaxWaitTime = 250 * time.Millisecond
conf.Consumer.MaxProcessingTime = 100 * time.Millisecond
conf.Consumer.Return.Errors = false
conf.Consumer.Offsets.AutoCommit.Enable = true
conf.Consumer.Offsets.AutoCommit.Interval = 1 * time.Second
conf.Consumer.Offsets.Initial = sarama.OffsetOldest
conf.Consumer.Offsets.Retry.Max = 3
*/

View File

@@ -0,0 +1,146 @@
package kafkamodule
import (
"context"
"github.com/IBM/sarama"
"github.com/duanhf2012/origin/v2/log"
"github.com/duanhf2012/origin/v2/service"
"time"
)
type IProducer interface {
}
type SyncProducer struct {
}
type AsyncProducer struct {
}
type Producer struct {
service.Module
sarama.SyncProducer
sarama.AsyncProducer
}
// NewProducerConfig 新建producerConfig
// kafkaVersion kafka版本
// returnErrreturnSucc 是否返回错误与成功
// requiredAcks -1 #全量同步确认,强可靠性保证(当所有的 leader 和 follower 都接收成功时)#WaitForAll 1 #leader 确认收到, 默认(仅 leader 反馈)#WaitForLocal 0 #不确认,但是吞吐量大(不 care 结果) #NoResponse
// Idempotent幂等 确保信息都准确写入一份副本,用于幂等生产者当这一项设置为true的时候生产者将保证生产的消息一定是有序且精确一次的
// partitioner 生成分区器,用于选择向哪个分区发送信息,默认情况下对消息密钥进行散列
func NewProducerConfig(kafkaVersion string, returnErr bool, returnSucc bool, requiredAcks sarama.RequiredAcks, Idempotent bool,
partitioner sarama.PartitionerConstructor) (*sarama.Config, error) {
config := sarama.NewConfig()
var err error
config.Version, err = sarama.ParseKafkaVersion(kafkaVersion)
if err != nil {
return nil, err
}
config.Producer.Return.Errors = returnErr
config.Producer.Return.Successes = returnSucc
config.Producer.RequiredAcks = requiredAcks
config.Producer.Partitioner = partitioner
config.Producer.Timeout = 10 * time.Second
config.Producer.Idempotent = Idempotent
if Idempotent == true {
config.Net.MaxOpenRequests = 1
}
return config, nil
}
func (p *Producer) SyncSetup(addr []string, config *sarama.Config) error {
var err error
p.SyncProducer, err = sarama.NewSyncProducer(addr, config)
if err != nil {
return err
}
return nil
}
func (p *Producer) ASyncSetup(addr []string, config *sarama.Config) error {
var err error
p.AsyncProducer, err = sarama.NewAsyncProducer(addr, config)
if err != nil {
return err
}
go func() {
p.asyncRun()
}()
return nil
}
func (p *Producer) asyncRun() {
for {
select {
case sm := <-p.Successes():
if sm.Metadata == nil {
break
}
asyncReturn := sm.Metadata.(*AsyncReturn)
asyncReturn.chanReturn <- asyncReturn
case em := <-p.Errors():
log.Error("async kafkamodule error", log.ErrorAttr("err", em.Err))
if em.Msg.Metadata == nil {
break
}
asyncReturn := em.Msg.Metadata.(*AsyncReturn)
asyncReturn.Err = em.Err
asyncReturn.chanReturn <- asyncReturn
}
}
}
type AsyncReturn struct {
Msg *sarama.ProducerMessage
Err error
chanReturn chan *AsyncReturn
}
func (ar *AsyncReturn) WaitOk(ctx context.Context) (*sarama.ProducerMessage, error) {
asyncReturn := ar.Msg.Metadata.(*AsyncReturn)
select {
case <-asyncReturn.chanReturn:
return asyncReturn.Msg, asyncReturn.Err
case <-ctx.Done():
return nil, ctx.Err()
}
}
func (p *Producer) AsyncSendMessage(msg *sarama.ProducerMessage) *AsyncReturn {
asyncReturn := AsyncReturn{Msg: msg, chanReturn: make(chan *AsyncReturn, 1)}
msg.Metadata = &asyncReturn
p.AsyncProducer.Input() <- msg
return &asyncReturn
}
func (p *Producer) AsyncPushMessage(msg *sarama.ProducerMessage) {
p.AsyncProducer.Input() <- msg
}
func (p *Producer) Close() {
if p.SyncProducer != nil {
p.SyncProducer.Close()
p.SyncProducer = nil
}
if p.AsyncProducer != nil {
p.AsyncProducer.Close()
p.AsyncProducer = nil
}
}
func (p *Producer) SendMessage(msg *sarama.ProducerMessage) (partition int32, offset int64, err error) {
return p.SyncProducer.SendMessage(msg)
}
func (p *Producer) SendMessages(msgs []*sarama.ProducerMessage) error {
return p.SyncProducer.SendMessages(msgs)
}

View File

@@ -0,0 +1,151 @@
package kafkamodule
import (
"context"
"fmt"
"github.com/IBM/sarama"
"testing"
"time"
)
// 对各参数和机制名称的说明https://blog.csdn.net/u013311345/article/details/129217728
type MsgReceiver struct {
t *testing.T
}
func (mr *MsgReceiver) Receiver(msgs []*sarama.ConsumerMessage) bool {
for _, m := range msgs {
mr.t.Logf("time:%s, topic:%s, partition:%d, offset:%d, key:%s, value:%s", time.Now().Format("2006-01-02 15:04:05.000"), m.Topic, m.Partition, m.Offset, m.Key, string(m.Value))
}
return true
}
var addr = []string{"192.168.13.24:9092", "192.168.13.24:9093", "192.168.13.24:9094", "192.168.13.24:9095"}
var topicName = []string{"test_topic_1", "test_topic_2"}
var kafkaVersion = "3.3.1"
func producer(t *testing.T) {
var admin KafkaAdmin
err := admin.Setup(kafkaVersion, addr)
if err != nil {
t.Fatal(err)
}
for _, tName := range topicName {
if admin.HasTopic(tName) == false {
err = admin.CreateTopic(tName, 2, 2, false)
t.Log(err)
}
}
var pd Producer
cfg, err := NewProducerConfig(kafkaVersion, true, true, sarama.WaitForAll, false, sarama.NewHashPartitioner)
if err != nil {
t.Fatal(err)
}
err = pd.SyncSetup(addr, cfg)
if err != nil {
t.Fatal(err)
}
now := time.Now()
//msgs := make([]*sarama.ProducerMessage, 0, 20000)
for i := 0; i < 20000; i++ {
var msg sarama.ProducerMessage
msg.Key = sarama.StringEncoder(fmt.Sprintf("%d", i))
msg.Topic = topicName[0]
msg.Value = sarama.StringEncoder(fmt.Sprintf("i'm %d", i))
pd.SendMessage(&msg)
//msgs = append(msgs, &msg)
}
//err = pd.SendMessages(msgs)
//t.Log(err)
t.Log(time.Now().Sub(now).Milliseconds())
pd.Close()
}
func producer_async(t *testing.T) {
var admin KafkaAdmin
err := admin.Setup(kafkaVersion, addr)
if err != nil {
t.Fatal(err)
}
for _, tName := range topicName {
if admin.HasTopic(tName) == false {
err = admin.CreateTopic(tName, 10, 2, false)
t.Log(err)
}
}
var pd Producer
cfg, err := NewProducerConfig(kafkaVersion, true, true, sarama.WaitForAll, false, sarama.NewHashPartitioner)
if err != nil {
t.Fatal(err)
}
err = pd.ASyncSetup(addr, cfg)
if err != nil {
t.Fatal(err)
}
now := time.Now()
msgs := make([]*AsyncReturn, 0, 20000)
for i := 0; i < 200000; i++ {
var msg sarama.ProducerMessage
msg.Key = sarama.StringEncoder(fmt.Sprintf("%d", i))
msg.Topic = topicName[0]
msg.Value = sarama.StringEncoder(fmt.Sprintf("i'm %d", i))
r := pd.AsyncSendMessage(&msg)
msgs = append(msgs, r)
}
//err = pd.SendMessages(msgs)
//t.Log(err)
for _, r := range msgs {
r.WaitOk(context.Background())
//t.Log(m, e)
}
t.Log(time.Now().Sub(now).Milliseconds())
time.Sleep(1000 * time.Second)
pd.Close()
}
func consumer(t *testing.T) {
var admin KafkaAdmin
err := admin.Setup(kafkaVersion, addr)
if err != nil {
t.Fatal(err)
}
for _, tName := range topicName {
if admin.HasTopic(tName) == false {
err = admin.CreateTopic(tName, 10, 2, false)
t.Log(err)
}
}
var cg ConsumerGroup
cfg, err := NewConsumerConfig(kafkaVersion, "sticky", sarama.OffsetOldest)
if err != nil {
t.Fatal(err)
}
err = cg.Setup(addr, topicName, "test_groupId", cfg, 50*time.Second, 10, &MsgReceiver{t: t})
t.Log(err)
time.Sleep(10000 * time.Second)
cg.Close()
}
func TestConsumerAndProducer(t *testing.T) {
producer_async(t)
//go producer(t)
//producer(t)
//consumer(t)
}

View File

@@ -0,0 +1,7 @@
package kafkamodule
type Sasl struct {
UserName string `json:"UserName"`
Passwd string `json:"Passwd"`
InstanceId string `json:"InstanceId"`
}

View File

@@ -1,181 +0,0 @@
package mongomodule
import (
"github.com/duanhf2012/origin/v2/log"
"gopkg.in/mgo.v2"
"gopkg.in/mgo.v2/bson"
"sync"
"time"
"container/heap"
_ "gopkg.in/mgo.v2"
)
// session
type Session struct {
*mgo.Session
ref int
index int
}
type SessionHeap []*Session
func (h SessionHeap) Len() int {
return len(h)
}
func (h SessionHeap) Less(i, j int) bool {
return h[i].ref < h[j].ref
}
func (h SessionHeap) Swap(i, j int) {
h[i], h[j] = h[j], h[i]
h[i].index = i
h[j].index = j
}
func (h *SessionHeap) Push(s interface{}) {
s.(*Session).index = len(*h)
*h = append(*h, s.(*Session))
}
func (h *SessionHeap) Pop() interface{} {
l := len(*h)
s := (*h)[l-1]
s.index = -1
*h = (*h)[:l-1]
return s
}
type DialContext struct {
sync.Mutex
sessions SessionHeap
}
type MongoModule struct {
dailContext *DialContext
}
func (slf *MongoModule) Init(url string,sessionNum uint32,dialTimeout time.Duration, timeout time.Duration) error {
var err error
slf.dailContext, err = dialWithTimeout(url, sessionNum, dialTimeout, timeout)
return err
}
func (slf *MongoModule) Ref() *Session{
return slf.dailContext.Ref()
}
func (slf *MongoModule) UnRef(s *Session) {
slf.dailContext.UnRef(s)
}
// goroutine safe
func dialWithTimeout(url string, sessionNum uint32, dialTimeout time.Duration, timeout time.Duration) (*DialContext, error) {
if sessionNum <= 0 {
sessionNum = 100
log.Release("invalid sessionNum, reset to %v", sessionNum)
}
s, err := mgo.DialWithTimeout(url, dialTimeout)
if err != nil {
return nil, err
}
s.SetMode(mgo.Strong,true)
s.SetSyncTimeout(timeout)
s.SetSocketTimeout(timeout)
c := new(DialContext)
// sessions
c.sessions = make(SessionHeap, sessionNum)
c.sessions[0] = &Session{s, 0, 0}
for i := 1; i < int(sessionNum); i++ {
c.sessions[i] = &Session{s.New(), 0, i}
}
heap.Init(&c.sessions)
return c, nil
}
// goroutine safe
func (c *DialContext) Close() {
c.Lock()
for _, s := range c.sessions {
s.Close()
}
c.Unlock()
}
// goroutine safe
func (c *DialContext) Ref() *Session {
c.Lock()
s := c.sessions[0]
if s.ref == 0 {
s.Refresh()
}
s.ref++
heap.Fix(&c.sessions, 0)
c.Unlock()
return s
}
// goroutine safe
func (c *DialContext) UnRef(s *Session) {
if s == nil {
return
}
c.Lock()
s.ref--
heap.Fix(&c.sessions, s.index)
c.Unlock()
}
// goroutine safe
func (s *Session) EnsureCounter(db string, collection string, id string) error {
err := s.DB(db).C(collection).Insert(bson.M{
"_id": id,
"seq": 0,
})
if mgo.IsDup(err) {
return nil
} else {
return err
}
}
// goroutine safe
func (s *Session) NextSeq(db string, collection string, id string) (int, error) {
var res struct {
Seq int
}
_, err := s.DB(db).C(collection).FindId(id).Apply(mgo.Change{
Update: bson.M{"$inc": bson.M{"seq": 1}},
ReturnNew: true,
}, &res)
return res.Seq, err
}
// goroutine safe
func (s *Session) EnsureIndex(db string, collection string, key []string, bBackground bool) error {
return s.DB(db).C(collection).EnsureIndex(mgo.Index{
Key: key,
Unique: false,
Sparse: true,
Background: bBackground,
})
}
// goroutine safe
func (s *Session) EnsureUniqueIndex(db string, collection string, key []string, bBackground bool) error {
return s.DB(db).C(collection).EnsureIndex(mgo.Index{
Key: key,
Unique: true,
Sparse: true,
Background: bBackground,
})
}

View File

@@ -1,95 +0,0 @@
package mongomodule
import (
"fmt"
_ "gopkg.in/mgo.v2"
"gopkg.in/mgo.v2/bson"
"testing"
"time"
)
type Student struct {
ID bson.ObjectId `bson:"_id"`
Name string `bson: "name"`
Age int `bson: "age"`
Sid string `bson: "sid"`
Status int `bson: "status"`
}
type StudentName struct {
Name string `bson: "name"`
}
func Test_Example(t *testing.T) {
module:=MongoModule{}
module.Init("mongodb://admin:123456@192.168.2.119:27017",100, 5*time.Second,5*time.Second)
// take session
s := module.Take()
c := s.DB("test2").C("t_student")
//2.定义对象
insertData := Student{
ID:bson.NewObjectId(),
Name: "seeta11",
Age: 35, //*^_^*
Sid: "s20180907",
Status: 1,
}
updateData := Student{
Name: "seeta11",
Age: 18,
Sid: "s20180907",
Status: 1,
}
//3.插入数据
err := c.Insert(&insertData)
//4.查找数据
selector := bson.M{"_id":bson.ObjectIdHex("5f25303e999c622d361989b0")}
m:=Student{}
err = c.Find(selector).One(&m)
//5.更新数据
//selector2 := bson.M{"_id":bson.ObjectIdHex("5f25303e999c622d361989b0")}
updateData.ID = bson.ObjectIdHex("5f25303e999c622d361989b0")
err = c.UpdateId(bson.ObjectIdHex("5f25303e999c622d361989b0"),&updateData)
if err != nil {
fmt.Print(err)
}
//6.删除数据
err = c.RemoveId(bson.ObjectIdHex("5f252f09999c622d36198951"))
if err != nil {
fmt.Print(err)
}
//7.序号自增
s.EnsureCounter("test2","t_student","5f252f09999c622d36198951")
for i := 0; i < 3; i++ {
id, err := s.NextSeq("test2", "t_student", "5f252f09999c622d36198951")
if err != nil {
fmt.Println(err)
return
}
fmt.Println(id)
}
//8.setoninsert使用
info,uErr := c.Upsert(bson.M{"_id":bson.ObjectIdHex("5f252f09999c622d36198951")},bson.M{
"$setOnInsert":bson.M{"Name":"setoninsert","Age":55}})
//9.修改部分数字数据
selector1 := bson.M{"_id":bson.ObjectIdHex("60473de655f1012e7453b369")}
update1 := bson.M{"$set":bson.M{"name":"xxxxx","age":1111}}
c.Update(selector1,update1)
fmt.Println(info,uErr)
}

View File

@@ -357,7 +357,7 @@ func (mp *MongoPersist) removeRankData(rankId uint64,keys []uint64) bool {
func (mp *MongoPersist) upsertToDB(collectName string,rankData *RankData) error{
condition := bson.D{{"_id", rankData.Key}}
upsert := bson.M{"_id":rankData.Key,"RefreshTime": rankData.refreshTimestamp, "SortData": rankData.SortData, "Data": rankData.Data,"ExData":rankData.ExData}
upsert := bson.M{"_id":rankData.Key,"RefreshTime": rankData.RefreshTimestamp, "SortData": rankData.SortData, "Data": rankData.Data,"ExData":rankData.ExData}
update := bson.M{"$set": upsert}
s := mp.mongo.TakeSession()

View File

@@ -19,7 +19,7 @@ type RankData struct {
Data []byte
ExData []int64
refreshTimestamp int64 //刷新时间
RefreshTimestamp int64 //刷新时间
//bRelease bool
ref bool
compareFunc func(other skip.Comparator) int
@@ -39,7 +39,7 @@ func NewRankData(isDec bool, data *rpc.RankData,refreshTimestamp int64) *RankDat
ret.ExData = append(ret.ExData,d.InitValue+d.IncreaseValue)
}
ret.refreshTimestamp = refreshTimestamp
ret.RefreshTimestamp = refreshTimestamp
return ret
}

View File

@@ -250,7 +250,7 @@ func (rs *RankSkip) UpsetRank(upsetData *rpc.RankData, refreshTimestamp int64, f
//找到的情况对比排名数据是否有变化,无变化进行data更新,有变化则进行删除更新
if compareIsEqual(rankNode.SortData, upsetData.SortData) {
rankNode.Data = upsetData.GetData()
rankNode.refreshTimestamp = refreshTimestamp
rankNode.RefreshTimestamp = refreshTimestamp
if fromLoad == false {
rs.rankModule.OnChangeRankData(rs, rankNode)